repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
Angryrou/udao
udao/optimization/tests/moo/conftest.py
[ { "identifier": "TabularContainer", "path": "udao/data/containers/tabular_container.py", "snippet": "class TabularContainer(BaseContainer):\n \"\"\"Container for tabular data, stored in DataFrame format.\"\"\"\n\n data: pd.DataFrame\n\n def get(self, key: str) -> np.ndarray:\n return self.data.loc[key].values # type: ignore" }, { "identifier": "TabularFeatureExtractor", "path": "udao/data/extractors/tabular_extractor.py", "snippet": "class TabularFeatureExtractor(StaticExtractor[TabularContainer]):\n \"\"\"\n Extract columns from a DataFrame as a TabularContainer.\n\n Parameters\n ----------\n columns : Union[List[str], Dict[str, Optional[VarTypes]]], optional\n Either:\n - a list of column names to extract from the DataFrame\n - a dictionary that maps column names to variable types\n if the variable type is None, the column is extracted\n without casting\n - None, in which case all columns are extracted\n\n \"\"\"\n\n def __init__(\n self, columns: Optional[Union[List[str], Dict[str, Optional[VarTypes]]]] = None\n ) -> None:\n self.columns = columns\n\n def extract_features(self, df: pd.DataFrame) -> TabularContainer:\n \"\"\"extract and cast features from the DataFrame\"\"\"\n if isinstance(self.columns, list):\n extracted_df = df[self.columns]\n elif isinstance(self.columns, dict):\n extracted_df = pd.DataFrame()\n for col, var_type in self.columns.items():\n if isinstance(var_type, VarTypes):\n extracted_df[col] = df[col].astype(var_type.value)\n elif var_type is None:\n extracted_df[col] = df[col]\n else:\n raise Exception(f\"Unknown variable type: {var_type}\")\n else:\n extracted_df = df\n\n return TabularContainer(extracted_df)" }, { "identifier": "DataProcessor", "path": "udao/data/handler/data_processor.py", "snippet": "class DataProcessor(Generic[IT]):\n \"\"\"\n Parameters\n ----------\n iterator_cls: Type[BaseDatasetIterator]\n Dataset iterator class type.\n\n feature_extractors: Mapping[str, Tuple[FeatureExtractorType, Any]]\n Dict that links a feature name to tuples of the form (Extractor, args)\n where Extractor implements FeatureExtractor and args are the arguments\n to be passed at initialization.\n N.B.: Feature names must match the iterator's parameters.\n\n If Extractor is a StaticExtractor, the features are extracted\n independently of the split.\n\n If Extractor is a TrainedExtractor, the extractor is first fitted\n on the train split and then applied to the other splits.\n\n feature_preprocessors: Optional[Mapping[str, List[FeaturePreprocessor]]]\n Dict that links a feature name to a list of tuples of the form (Processor, args)\n where Processor implements FeatureProcessor and args are the arguments\n to be passed at initialization.\n This allows to apply a series of processors to different features, e.g.\n to normalize the features.\n N.B.: Feature names must match the iterator's parameters.\n If Processor is a StaticExtractor, the features are processed\n independently of the split.\n\n If Extractor is a TrainedExtractor, the processor is first fitted\n on the train split and then applied to the other splits\n (typically for normalization).\n\n tensors_dtype: Optional[th.dtype]\n Data type of the tensors returned by the iterator, by default None\n \"\"\"\n\n def __init__(\n self,\n iterator_cls: Type[IT],\n feature_extractors: Dict[str, FeatureExtractor],\n feature_preprocessors: Optional[\n Mapping[\n str,\n Sequence[FeaturePreprocessor],\n ]\n ] = None,\n tensors_dtype: Optional[th.dtype] = None,\n ) -> None:\n self.iterator_cls = iterator_cls\n self.feature_extractors = feature_extractors\n self.feature_processors = feature_preprocessors or {}\n\n def _apply_processing_function(\n self,\n function: Callable[..., BaseContainer],\n data: Union[DataFrame, BaseContainer],\n split: DatasetType,\n is_trained: bool,\n ) -> BaseContainer:\n if is_trained:\n features = function(data, split=split)\n else:\n features = function(data)\n\n return features\n\n def extract_features(\n self, data: DataFrame, split: DatasetType\n ) -> Dict[str, BaseContainer]:\n \"\"\"Extract features for the different splits of the data.\n\n Returns\n -------\n DataHandler\n self\n\n Raises\n ------\n ValueError\n Expects data to be split before extracting features.\n \"\"\"\n features: Dict[str, BaseContainer] = {}\n for name, extractor in self.feature_extractors.items():\n features[name] = self._apply_processing_function(\n extractor.extract_features,\n data,\n split=split,\n is_trained=extractor.trained,\n )\n for preprocessor in self.feature_processors.get(name, []):\n features[name] = self._apply_processing_function(\n preprocessor.preprocess,\n features[name],\n split=split,\n is_trained=preprocessor.trained,\n )\n\n return features\n\n def make_iterator(self, data: DataFrame, keys: Sequence, split: DatasetType) -> IT:\n return self.iterator_cls(keys, **self.extract_features(data, split=split))\n\n def inverse_transform(\n self, container: TabularContainer, pipeline_name: str\n ) -> DataFrame:\n \"\"\"Inverse transform the data to the original format.\n\n Parameters\n ----------\n container: TabularContainer\n Data to be inverse transformed.\n pipeline_name: str\n Name of the feature pipeline to be inverse transformed.\n Returns\n -------\n DataFrame\n Inverse transformed data.\n \"\"\"\n\n extractor = self.feature_extractors[pipeline_name]\n if not isinstance(extractor, TabularFeatureExtractor):\n raise ValueError(\n \"Only TabularFeatureExtractor supports\"\n \"transforming back to original dataframe.\"\n )\n preprocessors = self.feature_processors.get(pipeline_name, [])\n\n for preprocessor in preprocessors[::-1]:\n if not hasattr(preprocessor, \"inverse_transform\"):\n raise ValueError(\n f\"Feature preprocessor {pipeline_name} does \"\n \"not have an inverse transform method.\"\n )\n container = preprocessor.inverse_transform(container) # type: ignore\n df = cast(TabularContainer, container).data\n return df" }, { "identifier": "StaticPreprocessor", "path": "udao/data/preprocessors/base_preprocessor.py", "snippet": "class StaticPreprocessor(ABC, Generic[T]):\n \"\"\"Base class for feature processors that do not require training.\"\"\"\n\n trained: bool = False\n\n def __init__(self) -> None:\n pass\n\n @abstractmethod\n def preprocess(self, container: T) -> T:\n pass" }, { "identifier": "DummyUdaoIterator", "path": "udao/data/tests/iterators/dummy_udao_iterator.py", "snippet": "class DummyUdaoIterator(UdaoIterator[UdaoInput, UdaoItemShape]):\n def __init__(\n self,\n keys: Sequence[str],\n tabular_features: TabularContainer,\n objectives: TabularContainer,\n ) -> None:\n super().__init__(keys, tabular_features=tabular_features, objectives=objectives)\n\n def _getitem(self, idx: int) -> Tuple[UdaoInput, th.Tensor]:\n key = self.keys[idx]\n return (\n UdaoInput(\n th.tensor(self.tabular_features.get(key), dtype=self.tensors_dtype)\n ),\n th.tensor(self.objectives.get(key), dtype=self.tensors_dtype),\n )\n\n @property\n def shape(self) -> UdaoItemShape:\n return UdaoItemShape(\n feature_names=list(self.tabular_features.data.columns),\n output_names=list(self.objectives.data.columns),\n )\n\n @staticmethod\n def collate(\n items: Sequence[Tuple[UdaoInput, th.Tensor]]\n ) -> Tuple[UdaoInput, th.Tensor]:\n features = UdaoInput(th.vstack([item[0].features for item in items]))\n objectives = th.vstack([item[1] for item in items])\n return features, objectives" }, { "identifier": "UdaoEmbedInput", "path": "udao/utils/interfaces.py", "snippet": "class UdaoEmbedInput(Generic[T], UdaoInput):\n embedding_input: T\n\n def to(self, device: th.device) -> \"UdaoEmbedInput\":\n if hasattr(self.embedding_input, \"to\"):\n return UdaoEmbedInput(\n self.embedding_input.to(device), self.features.to(device) # type: ignore\n )\n else:\n return UdaoEmbedInput(\n self.embedding_input, self.features.to(device) # type: ignore\n )" }, { "identifier": "Constraint", "path": "udao/optimization/concepts/constraint.py", "snippet": "class Constraint:\n \"\"\"An optimization element is either an objective or a constraint.\n\n The choice of the type depends on whether a DataProcessor is specified\n for the problem:\n - if no DataProcessor is provided: UdaoFunction, it is a callable\n that takes input_variables and input_parameters\n - else, th.nn.Module or other Callable returning a tensor.\n\n Parameters\n ----------\n function : Union[UdaoFunction, th.nn.Module, Callable[..., th.Tensor]]\n Objective function, either a UdaoFunction\n or a th.nn.Module if a DataProcessor is provided\n lower : Optional[float], optional\n lower bound of the element, by default None\n upper : Optional[float], optional\n upper bound of the element, by default None\n \"\"\"\n\n def __init__(\n self,\n function: Union[UdaoFunction, th.nn.Module, Callable[..., th.Tensor]],\n lower: Optional[float] = None,\n upper: Optional[float] = None,\n ) -> None:\n if isinstance(function, th.nn.Module):\n function.eval()\n for p in function.parameters():\n p.requires_grad = False\n self.function = function\n self.lower = lower\n self.upper = upper\n\n def __call__(self, *args: Any, **kwargs: Any) -> th.Tensor:\n return self.function(*args, **kwargs)\n\n def to(self, device: Optional[th.device]) -> \"Constraint\":\n if isinstance(self.function, th.nn.Module) and device is not None:\n self.function.to(device)\n return self\n\n def __repr__(self) -> str:\n return f\"Constraint(lower={self.lower}, upper={self.upper})\"" }, { "identifier": "Objective", "path": "udao/optimization/concepts/objective.py", "snippet": "class Objective(Constraint):\n \"\"\"\n\n Parameters\n ----------\n name : str\n Name of the objective.\n minimize : bool\n Direction of the objective: if True, minimize, else maximize.\n type: VarTypes\n Type of the objective, by default VarTypes.FLOAT\n \"\"\"\n\n def __init__(\n self,\n name: str,\n minimize: bool,\n function: Union[UdaoFunction, th.nn.Module, Callable[..., th.Tensor]],\n lower: Optional[float] = None,\n upper: Optional[float] = None,\n type: VarTypes = VarTypes.FLOAT,\n ):\n super().__init__(function=function, lower=lower, upper=upper)\n self.name = name\n self.minimize = minimize\n self.type = type\n\n @property\n def direction(self) -> int:\n \"\"\"Get gradient direction from optimization type\"\"\"\n if self.minimize:\n return 1\n else:\n return -1\n\n def __repr__(self) -> str:\n return (\n f\"Objective(name={self.name}, \"\n f\"direction={'min' if self.minimize else 'max'}, \"\n f\"lower={self.lower}, upper={self.upper})\"\n )" }, { "identifier": "BoolVariable", "path": "udao/optimization/concepts/variable.py", "snippet": "class BoolVariable(IntegerVariable):\n \"\"\"Boolean variable.\"\"\"\n\n lower: int = field(default=0, init=False)\n upper: int = field(default=1, init=False)" }, { "identifier": "FloatVariable", "path": "udao/optimization/concepts/variable.py", "snippet": "class FloatVariable(NumericVariable):\n \"\"\"Numeric variable with float values.\"\"\"\n\n lower: float\n upper: float" }, { "identifier": "IntegerVariable", "path": "udao/optimization/concepts/variable.py", "snippet": "class IntegerVariable(NumericVariable):\n \"\"\"Numeric variable with integer values.\"\"\"\n\n lower: int\n upper: int" }, { "identifier": "Variable", "path": "udao/optimization/concepts/variable.py", "snippet": "class Variable:\n \"\"\"Variable to optimize.\"\"\"\n\n pass" }, { "identifier": "MOProblem", "path": "udao/optimization/concepts/problem.py", "snippet": "class MOProblem(BaseProblem):\n \"\"\"Multi-objective optimization problem.\"\"\"\n\n def __init__(\n self,\n objectives: Sequence[Objective],\n variables: Dict[str, Variable],\n constraints: Sequence[Constraint],\n data_processor: Optional[DataProcessor] = None,\n input_parameters: Optional[Dict[str, Any]] = None,\n ) -> None:\n self.objectives = objectives\n super().__init__(\n variables,\n constraints,\n data_processor=data_processor,\n input_parameters=input_parameters,\n )\n\n def __repr__(self) -> str:\n return (\n f\"MOProblem(objectives={self.objectives}, \"\n f\"variables={self.variables}, \"\n f\"constraints={self.constraints}, \"\n f\"input_parameters={self.input_parameters})\"\n )" }, { "identifier": "InputParameters", "path": "udao/optimization/concepts/utils.py", "snippet": "class UdaoFunction(Protocol):\nclass ModelComponent:\nclass InaccurateModel(th.nn.Module):\n def __call__(\n self,\n input_variables: InputVariables,\n input_parameters: InputParameters = None,\n ) -> th.Tensor:\n def __init__(self, data_processor: DataProcessor, model: th.nn.Module) -> None:\n def process_data(\n self,\n input_variables: InputVariables,\n input_parameters: InputParameters = None,\n ) -> Tuple[Any, BaseIterator]:\n def __call__(\n self,\n input_variables: InputVariables,\n input_parameters: InputParameters = None,\n ) -> th.Tensor:\n def to(self, device: th.device) -> None:\n def __init__(\n self, model: th.nn.Module, std_func: th.nn.Module, alpha: float\n ) -> None:\n def forward(self, x: th.Tensor) -> th.Tensor:\ndef derive_unprocessed_input(\n input_variables: InputVariables,\n input_parameters: InputParameters = None,\n device: Optional[th.device] = None,\n) -> Tuple[Dict[str, th.Tensor], Dict[str, Any]]:\ndef derive_processed_input(\n data_processor: DataProcessor[UdaoIterator],\n input_variables: InputVariables,\n input_parameters: InputParameters = None,\n device: Optional[th.device] = None,\n) -> Tuple[UdaoInput, UdaoIterator]:" }, { "identifier": "MOGD", "path": "udao/optimization/soo/mogd.py", "snippet": "class MOGD(SOSolver):\n \"\"\"MOGD solver for single-objective optimization.\n\n Performs gradient descent on input variables by minimizing an\n objective loss and a constraint loss.\n \"\"\"\n\n @dataclass\n class Params:\n learning_rate: float\n \"\"\"learning rate of Adam optimizer applied to input variables\"\"\"\n max_iters: int\n \"\"\"maximum number of iterations for a single local search\"\"\"\n patience: int\n \"\"\"maximum number of iterations without improvement\"\"\"\n multistart: int\n \"\"\"number of random starts for gradient descent\"\"\"\n objective_stress: float = 10.0\n \"\"\"stress term for objective functions\"\"\"\n constraint_stress: float = 1e5\n \"\"\"stress term for constraint functions\"\"\"\n strict_rounding: bool = False\n \"\"\"whether strictly rounding integer variables at each iteration. \"\"\"\n batch_size: int = 1\n \"\"\"batch size for gradient descent\"\"\"\n device: Optional[th.device] = field(default_factory=get_default_device)\n \"\"\"device on which to perform torch operations, by default available device.\"\"\"\n dtype: th.dtype = th.float32\n \"\"\"type of the tensors\"\"\"\n\n def __init__(self, params: Params) -> None:\n super().__init__()\n self.lr = params.learning_rate\n self.max_iter = params.max_iters\n self.patience = params.patience\n self.multistart = params.multistart\n self.objective_stress = params.objective_stress\n self.constraint_stress = params.constraint_stress\n self.strict_rounding = params.strict_rounding\n self.batch_size = params.batch_size\n self.device = params.device\n self.dtype = params.dtype\n\n def _get_unprocessed_input_values(\n self,\n numeric_variables: Dict[str, co.NumericVariable],\n input_parameters: Optional[Dict[str, Any]] = None,\n seed: Optional[int] = None,\n ) -> Tuple[Dict[str, th.Tensor], Dict[str, Any]]:\n \"\"\"\n\n Parameters\n ----------\n numeric_variables : Dict[str, co.NumericVariable]\n Numeric variables for which to get random values\n input_parameters : Optional[Dict[str, Any]], optional\n Non decision parts of the input, by default None\n seed : Optional[int], optional\n Random seed, by default None\n\n Returns\n -------\n Tuple[Dict[str, th.Tensor], Dict[str, Any]]\n - random values as a tensor for each numeric variable\n - input parameters valuies\n \"\"\"\n numeric_values: Dict[str, np.ndarray] = {}\n\n for i, (name, variable) in enumerate(numeric_variables.items()):\n numeric_values[name] = co.variable.get_random_variable_values(\n variable, self.batch_size, seed=seed + i if seed is not None else None\n )\n return derive_unprocessed_input(\n input_variables=numeric_values,\n input_parameters=input_parameters,\n device=self.device,\n )\n\n def _get_processed_input_values(\n self,\n numeric_variables: Dict[str, co.NumericVariable],\n data_processor: DataProcessor,\n input_parameters: Optional[Dict[str, Any]] = None,\n seed: Optional[int] = None,\n ) -> Tuple[UdaoInput, UdaoItemShape, Callable[[th.Tensor], TabularContainer]]:\n \"\"\"Get random values for numeric variables\n\n Parameters\n ----------\n numeric_variables : Dict[str, co.NumericVariable]\n Numeric variables on which to apply gradients\n data_processor : DataProcessor\n Data processor to process input variables\n input_parameters : Optional[Dict[str, Any]], optional\n Non decision parts of the input, by default None\n\n Returns\n -------\n Tuple[UdaoInput, UdaoInputShape, Callable[[th.Tensor], TabularContainer]]\n - random values for numeric variables\n - shape of the input\n - function to convert a tensor to a TabularContainer\n \"\"\"\n numeric_values: Dict[str, np.ndarray] = {}\n\n for i, (name, variable) in enumerate(numeric_variables.items()):\n numeric_values[name] = co.variable.get_random_variable_values(\n variable, self.batch_size, seed=seed + i if seed is not None else None\n )\n input_data, iterator = derive_processed_input(\n data_processor=data_processor,\n input_parameters=input_parameters or {},\n input_variables=numeric_values,\n device=self.device,\n )\n make_tabular_container = cast(\n UdaoIterator, iterator\n ).get_tabular_features_container\n\n input_data_shape = iterator.shape\n\n return (\n input_data,\n input_data_shape,\n make_tabular_container,\n )\n\n def _get_unprocessed_input_bounds(\n self,\n numeric_variables: Dict[str, co.NumericVariable],\n ) -> Tuple[Dict[str, float], Dict[str, float]]:\n \"\"\"\n\n Parameters\n ----------\n numeric_variables : Dict[str, co.NumericVariable]\n Variables for which to get bounds\n\n Returns\n -------\n Tuple[Dict[str, float], Dict[str, float]]\n - lower bounds of numeric variables\n - upper bounds of numeric variables\n \"\"\"\n lower_numeric_values = {\n name: variable.lower for name, variable in numeric_variables.items()\n }\n upper_numeric_values = {\n name: variable.upper for name, variable in numeric_variables.items()\n }\n return lower_numeric_values, upper_numeric_values\n\n def _get_processed_input_bounds(\n self,\n numeric_variables: Dict[str, co.NumericVariable],\n data_processor: DataProcessor,\n input_parameters: Optional[Dict[str, Any]] = None,\n ) -> Tuple[UdaoInput, UdaoInput]:\n \"\"\"Get bounds of numeric variables\n\n Parameters\n ----------\n numeric_variables : Dict[str, co.NumericVariable]\n Numeric variables on which to apply gradients\n data_processor : DataProcessor\n Data processor to process input variables\n input_parameters : Optional[Dict[str, Any]], optional\n Input parameters, by default None\n\n Returns\n -------\n Tuple[UdaoInput, UdaoInput]\n Lower and upper bounds of numeric\n variables in the form of a UdaoInput\n \"\"\"\n lower_numeric_values = {\n name: variable.lower for name, variable in numeric_variables.items()\n }\n upper_numeric_values = {\n name: variable.upper for name, variable in numeric_variables.items()\n }\n lower_input, _ = derive_processed_input(\n data_processor=data_processor,\n input_parameters=input_parameters,\n input_variables=lower_numeric_values,\n )\n upper_input, _ = derive_processed_input(\n data_processor=data_processor,\n input_parameters=input_parameters,\n input_variables=upper_numeric_values,\n )\n if self.device:\n return lower_input.to(self.device), upper_input.to(self.device)\n else:\n return lower_input, upper_input\n\n def _gradient_descent(\n self,\n problem: co.SOProblem,\n input_data: Union[UdaoInput, Dict],\n optimizer: th.optim.Optimizer,\n ) -> Tuple[int, float, float]:\n \"\"\"Perform a gradient descent step on input variables\n\n Parameters\n ----------\n problem : co.SOProblem\n Single-objective optimization problem\n input_data : Union[UdaoInput, Dict]\n Input data - can have different types depending on whether\n the input variables are processed or not.\n - UdaoInput: the naive input\n - Dict: {\"input_variables\": ..., \"input_parameters\": ...}\n\n optimizer : th.optim.Optimizer\n PyTorch optimizer\n\n Returns\n -------\n Tuple[int, float, float]\n - index of minimum loss\n - minimum loss\n - objective value at minimum loss\n\n Raises\n ------\n UncompliantSolutionError\n If no solution within bounds is found\n \"\"\"\n # Compute objective, constraints and corresponding losses\n\n loss_meta = self._compute_loss(problem, input_data)\n sum_loss = loss_meta[\"sum_loss\"]\n min_loss = loss_meta[\"min_loss\"]\n min_loss_id = loss_meta[\"min_loss_id\"]\n best_obj = loss_meta[\"best_obj\"]\n is_within_constraint = loss_meta[\"is_within_constraint\"]\n\n optimizer.zero_grad()\n sum_loss.backward() # type: ignore\n optimizer.step()\n\n if is_within_constraint and (\n self.within_objective_bounds(best_obj, problem.objective)\n ):\n return min_loss_id, min_loss, best_obj\n else:\n raise UncompliantSolutionError(\"No solution within bounds found!\")\n\n def _log_success(\n self,\n problem: co.SOProblem,\n iter: int,\n best_obj: float,\n best_iter: int,\n best_feature_input: Any,\n ) -> None:\n logger.debug(\n f\"Finished at iteration {iter}, best local {problem.objective.name} \"\n f\"found {best_obj:.5f}\"\n f\" \\nat iteration {best_iter},\"\n f\" \\nwith vars: {best_feature_input}, for \"\n f\"objective {problem.objective} and constraints {problem.constraints}\"\n )\n\n def _log_failure(\n self,\n problem: co.SOProblem,\n iter: int,\n ) -> None:\n logger.debug(\n f\"Finished at iteration {iter}, no valid {problem.objective.name}\"\n f\" found for input parameters {problem.input_parameters} with \"\n f\"objective {problem.objective} and constraints {problem.constraints}\"\n )\n\n def _unprocessed_single_start_opt(\n self,\n problem: co.SOProblem,\n seed: Optional[int] = None,\n ) -> Tuple[float, Dict[str, float], float]:\n \"\"\"Perform a single start optimization, in the case where\n no data processor is defined.\n The input variables are transformed to a dictionary of tensors and are\n optimized directly, by being passed to the objective function along\n with the input parameters.\n \"\"\"\n best_iter: Optional[int] = None\n best_loss = np.inf\n best_obj: Optional[float] = None\n best_feature_input: Optional[Dict[str, th.Tensor]] = None\n\n (\n input_variable_values,\n input_parameter_values,\n ) = self._get_unprocessed_input_values(\n cast(Dict[str, co.NumericVariable], problem.variables),\n input_parameters=problem.input_parameters,\n seed=seed,\n )\n lower_input, upper_input = self._get_unprocessed_input_bounds(\n cast(Dict[str, co.NumericVariable], problem.variables)\n )\n for name in input_variable_values:\n input_variable_values[name].requires_grad_(True)\n optimizer = optim.Adam([t for t in input_variable_values.values()], lr=self.lr)\n i = 0\n while i < self.max_iter:\n with th.no_grad():\n input_variable_values_backup = {\n k: v.detach().clone() for k, v in input_variable_values.items()\n }\n try:\n min_loss_id, min_loss, local_best_obj = self._gradient_descent(\n problem,\n {\n \"input_variables\": input_variable_values,\n \"input_parameters\": input_parameter_values,\n },\n optimizer=optimizer,\n )\n except UncompliantSolutionError:\n pass\n else:\n if min_loss < best_loss:\n best_loss = min_loss\n best_obj = local_best_obj\n best_feature_input = {\n k: v[min_loss_id].reshape(1, -1)\n for k, v in input_variable_values_backup.items()\n }\n best_iter = i\n\n with th.no_grad():\n # Update input_variable_values with constrained values\n for k in input_variable_values:\n input_variable_values[k].data = th.clip(\n input_variable_values[k].data,\n lower_input[k],\n upper_input[k],\n )\n\n if self.strict_rounding:\n # Round all integer variables at each iteration\n for k in input_variable_values:\n if isinstance(problem.variables[k], co.IntegerVariable):\n input_variable_values[k].data = input_variable_values[\n k\n ].data.round()\n\n if best_iter is not None and i > best_iter + self.patience:\n break\n i += 1\n\n if best_iter is None or best_obj is None or best_feature_input is None:\n self._log_failure(problem, i)\n raise NoSolutionError\n\n if not self.strict_rounding:\n for k in best_feature_input:\n if isinstance(problem.variables[k], co.IntegerVariable):\n best_feature_input[k].data = best_feature_input[k].data.round()\n loss_meta = self._compute_loss(\n problem,\n {\n \"input_variables\": best_feature_input,\n \"input_parameters\": input_parameter_values,\n },\n )\n best_loss = loss_meta[\"min_loss\"]\n best_obj = loss_meta[\"best_obj\"]\n is_within_constraint = loss_meta[\"is_within_constraint\"]\n if (\n best_obj is None\n or not is_within_constraint\n or not self.within_objective_bounds(best_obj, problem.objective)\n ):\n self._log_failure(problem, i)\n raise NoSolutionError\n\n best_raw_vars = {\n name: best_feature_input[name]\n .cpu()\n .numpy()\n .squeeze()\n .tolist() # turn np.ndarray to float\n for name in problem.variables\n }\n self._log_success(problem, i, best_obj, best_iter, best_raw_vars)\n return best_obj, best_raw_vars, best_loss\n\n def _processed_single_start_opt(\n self,\n problem: co.SOProblem,\n seed: Optional[int] = None,\n ) -> Tuple[float, Dict[str, float], float]:\n \"\"\"Perform a single start optimization, in the case where\n a data processor is defined.\n\n input variables and parameters are processed by the data processor.\n Gradient descent is performed on the processed input variables.\n Variables are then inverse transformed to get the raw variables.\n \"\"\"\n if not problem.data_processor:\n raise Exception(\"Data processor is not defined!\")\n best_iter: Optional[int] = None\n best_loss = np.inf\n best_obj: Optional[float] = None\n best_feature_input: Optional[th.Tensor] = None\n # Random numeric variables and their characteristics\n (\n input_data,\n input_data_shape,\n make_tabular_container,\n ) = self._get_processed_input_values(\n cast(Dict[str, co.NumericVariable], problem.variables),\n data_processor=problem.data_processor,\n input_parameters=problem.input_parameters,\n seed=seed,\n )\n # Bounds of numeric variables\n lower_input, upper_input = self._get_processed_input_bounds(\n cast(Dict[str, co.NumericVariable], problem.variables),\n data_processor=problem.data_processor,\n input_parameters=problem.input_parameters,\n )\n # Indices of numeric variables on which to apply gradients\n mask = th.tensor(\n [i in problem.variables for i in input_data_shape.feature_names],\n device=self.device,\n )\n grad_indices = th.nonzero(mask, as_tuple=False).squeeze()\n input_vars_subvector = input_data.features[:, grad_indices].clone().detach()\n input_vars_subvector.requires_grad_(True)\n\n optimizer = optim.Adam([input_vars_subvector], lr=self.lr)\n i = 0\n while i < self.max_iter:\n input_data.features = input_data.features.clone().detach()\n input_data.features[:, grad_indices] = input_vars_subvector\n try:\n min_loss_id, min_loss, local_best_obj = self._gradient_descent(\n problem,\n input_data,\n optimizer=optimizer,\n )\n except UncompliantSolutionError:\n pass\n else:\n if min_loss < best_loss:\n best_loss = min_loss\n best_obj = local_best_obj\n best_feature_input = (\n input_data.features.detach()[min_loss_id].clone().reshape(1, -1)\n )\n best_iter = i\n\n with th.no_grad():\n # Update input_vars_subvector with constrained values\n input_vars_subvector.data = th.clip(\n input_vars_subvector.data,\n # Use .data to avoid gradient tracking during update\n lower_input.features[0, grad_indices],\n upper_input.features[0, grad_indices],\n )\n\n if self.strict_rounding:\n # Round all integer variables at each iteration\n input_data.features[:, grad_indices] = input_vars_subvector.data\n feature_container = make_tabular_container(\n input_data.features.detach()\n )\n best_raw_df = problem.data_processor.inverse_transform(\n feature_container, \"tabular_features\"\n )\n numeric_values: Dict[str, np.ndarray] = {\n name: best_raw_df[[name]].values.round()[:, 0]\n if isinstance(variable, co.IntegerVariable)\n else best_raw_df[[name]].values[:, 0]\n for name, variable in problem.variables.items()\n }\n input_data_raw, _ = derive_processed_input(\n data_processor=problem.data_processor,\n input_parameters=problem.input_parameters or {},\n input_variables=numeric_values,\n device=self.device,\n )\n input_vars_subvector.data = input_data_raw.features[:, grad_indices]\n\n if best_iter is not None and i > best_iter + self.patience:\n break\n i += 1\n\n if best_iter is None or best_obj is None or best_feature_input is None:\n self._log_failure(problem, i)\n raise NoSolutionError\n\n with th.no_grad():\n best_feature_input = cast(th.Tensor, best_feature_input)\n feature_container = make_tabular_container(best_feature_input)\n best_raw_df = problem.data_processor.inverse_transform(\n feature_container, \"tabular_features\"\n )\n if not self.strict_rounding:\n best_raw_vars: Dict[str, Any] = {\n name: best_raw_df[[name]].values.round()[:, 0]\n if isinstance(variable, co.IntegerVariable)\n else best_raw_df[[name]].values[:, 0]\n for name, variable in problem.variables.items()\n }\n input_data_best_raw, _ = derive_processed_input(\n data_processor=problem.data_processor,\n input_parameters=problem.input_parameters or {},\n input_variables=best_raw_vars,\n device=self.device,\n )\n loss_meta = self._compute_loss(problem, input_data_best_raw)\n best_loss = loss_meta[\"min_loss\"]\n best_obj = loss_meta[\"best_obj\"]\n is_within_constraint = loss_meta[\"is_within_constraint\"]\n if (\n best_obj is None\n or not is_within_constraint\n or not self.within_objective_bounds(best_obj, problem.objective)\n ):\n self._log_failure(problem, i)\n raise NoSolutionError\n else:\n best_raw_vars = {\n name: best_raw_df[[name]]\n .values.squeeze()\n .tolist() # turn np.ndarray to float\n for name in problem.variables\n }\n self._log_success(problem, i, best_obj, best_iter, best_raw_vars)\n return best_obj, best_raw_vars, best_loss\n\n def _single_start_opt(\n self,\n problem: co.SOProblem,\n seed: Optional[int] = None,\n ) -> Tuple[float, Dict[str, float], float]:\n \"\"\"Perform a single start optimization.\n Categorical variables are fixed to the values in input_parameters.\n (a grid search of categorical variables is performed in solve)\n This is where gradient descent is performed.\n\n Parameters\n ----------\n numeric_variables : Dict[str, co.NumericVariable]\n Numeric variables on which to apply gradients\n objective : co.Objective\n Objective to be optimized\n constraints : Sequence[co.Constraint]\n Constraints to be satisfied\n input_parameters : Optional[Dict[str, Any]], optional\n Non decision parts of the input, by default None\n seed: int, by default None\n random seed\n\n Returns\n -------\n Tuple[float, Dict[str, float], flat]\n - objective value\n - variables\n - best loss value\n\n Raises\n ------\n NoSolutionError\n No valid solution is found\n \"\"\"\n\n if not problem.data_processor:\n return self._unprocessed_single_start_opt(problem, seed=seed)\n else:\n return self._processed_single_start_opt(problem, seed=seed)\n\n def solve(\n self, problem: co.SOProblem, seed: Optional[int] = None\n ) -> Tuple[float, Dict[str, float]]:\n if seed is not None:\n th.manual_seed(seed)\n if self.device:\n for constraint in problem.constraints:\n constraint.to(self.device)\n problem.objective.to(self.device)\n\n categorical_variables = [\n name\n for name, variable in problem.variables.items()\n if isinstance(variable, co.EnumVariable)\n ]\n numeric_variables = {\n name: variable\n for name, variable in problem.variables.items()\n if isinstance(variable, co.NumericVariable)\n }\n\n meshed_categorical_vars = self.get_meshed_categorical_vars(problem.variables)\n\n if meshed_categorical_vars is None:\n meshed_categorical_vars = np.array([0])\n\n best_loss_list: List[float] = []\n obj_list: List[float] = []\n vars_list: List[Dict] = []\n for i in range(self.multistart):\n for categorical_cell in meshed_categorical_vars:\n categorical_values = {\n name: categorical_cell[ind]\n for ind, name in enumerate(categorical_variables)\n } # from {id: value} to {name: value}\n fixed_values = {\n **categorical_values,\n **(problem.input_parameters or {}),\n }\n try:\n (\n obj_pred,\n best_raw_vars,\n best_loss,\n ) = self._single_start_opt(\n co.SOProblem(\n variables=numeric_variables, # type: ignore\n input_parameters=fixed_values,\n objective=problem.objective,\n constraints=problem.constraints or [],\n data_processor=problem.data_processor,\n ),\n seed=seed + i if seed is not None else None,\n )\n except NoSolutionError:\n continue\n else:\n best_loss_list.append(best_loss)\n obj_list.append(obj_pred)\n vars_list.append(best_raw_vars)\n if not obj_list:\n raise NoSolutionError(\"No valid solutions and variables found!\")\n\n idx = np.argmin(best_loss_list)\n vars_cand = vars_list[idx]\n if vars_cand is not None:\n obj_cand = obj_list[idx]\n if obj_cand is None:\n raise Exception(f\"Unexpected objs_list[{idx}] is None.\")\n else:\n raise NoSolutionError(\"No valid solutions and variables found!\")\n\n return obj_cand, vars_cand\n\n ##################\n ## _loss ##\n ##################\n def constraints_loss(\n self, constraint_values: List[th.Tensor], constraints: Sequence[co.Constraint]\n ) -> th.Tensor:\n \"\"\"\n compute loss of the values of each constraint function fixme: double-check\n\n Parameters\n ----------\n constraint_values : List[th.Tensor]\n values of each constraint function\n constraints : Sequence[co.Constraint]\n constraint functions\n\n Returns\n -------\n th.Tensor\n loss of the values of each constraint function\n\n \"\"\"\n\n # vars: a tensor\n # get loss for constraint functions defined in the problem setting\n total_loss = th.zeros_like(\n constraint_values[0], device=self.device, dtype=self.dtype\n )\n for i, (constraint_value, constraint) in enumerate(\n zip(constraint_values, constraints)\n ):\n stress = (\n self.objective_stress\n if isinstance(constraint, co.Objective)\n else self.constraint_stress\n )\n constraint_violation = th.zeros_like(\n constraint_values[0], device=self.device, dtype=self.dtype\n )\n if constraint.upper is not None and constraint.lower is not None:\n if constraint.upper == constraint.lower:\n constraint_violation = th.abs(constraint_value - constraint.upper)\n else:\n normed_constraint = (constraint_value - constraint.lower) / (\n constraint.upper - constraint.lower\n )\n constraint_violation = th.where(\n (normed_constraint < 0) | (normed_constraint > 1),\n (normed_constraint - 0.5),\n 0,\n )\n elif constraint.lower is not None:\n constraint_violation = th.relu(constraint.lower - constraint_value)\n elif constraint.upper is not None:\n constraint_violation = th.relu(constraint_value - constraint.upper)\n total_loss += (\n constraint_violation**2 + stress * (constraint_violation > 0).float()\n )\n\n return total_loss\n\n def objective_loss(\n self, objective_value: th.Tensor, objective: co.Objective\n ) -> th.Tensor:\n \"\"\"Compute the objective loss for a given objective value:\n - if no bounds are specified, use the squared objective value\n - if both bounds are specified, use the squared normalized\n objective value if it is within the bounds, otherwise\n add a stress term to a squared distance to middle of the bounds\n\n Parameters\n ----------\n objective_value : th.Tensor\n Tensor of objective values\n objective : co.Objective\n Objective function\n\n Returns\n -------\n th.Tensor\n Tensor of objective losses\n\n Raises\n ------\n NotImplementedError\n If only one bound is specified for the objective\n\n \"\"\"\n\n if objective.upper is None and objective.lower is None:\n loss = (\n th.sign(objective_value) * (objective_value**2) * objective.direction\n )\n elif objective.upper is not None and objective.lower is not None:\n norm_cst_obj_pred = (objective_value - objective.lower) / (\n objective.upper - objective.lower\n ) # scaled\n loss = th.where(\n (norm_cst_obj_pred < 0) | (norm_cst_obj_pred > 1),\n (norm_cst_obj_pred - 0.5) ** 2 + self.objective_stress,\n norm_cst_obj_pred * objective.direction,\n )\n else:\n raise NotImplementedError(\"Objective with only one bound is not supported\")\n return loss\n\n def _obj_forward(\n self,\n optimization_element: co.Constraint,\n input_data: Union[UdaoInput, Dict],\n ) -> th.Tensor:\n if isinstance(input_data, UdaoInput):\n return optimization_element.function(input_data) # type: ignore\n else:\n # Dict when unprocessed inputs\n return optimization_element.function(**input_data)\n\n def _compute_loss(\n self, problem: co.SOProblem, input_data: Union[UdaoInput, Dict]\n ) -> Dict[str, Any]:\n obj_output = self._obj_forward(problem.objective, input_data)\n objective_loss = self.objective_loss(obj_output, problem.objective)\n constraint_loss = th.zeros_like(objective_loss, device=self.device)\n\n if problem.constraints:\n const_outputs = [\n self._obj_forward(constraint, input_data)\n for constraint in problem.constraints\n ]\n constraint_loss = self.constraints_loss(const_outputs, problem.constraints)\n\n loss = objective_loss + constraint_loss\n min_loss_id = int(th.argmin(loss).cpu().item())\n\n return {\n \"sum_loss\": th.sum(loss),\n \"min_loss\": th.min(loss).cpu().item(),\n \"min_loss_id\": min_loss_id,\n \"best_obj\": obj_output[min_loss_id].cpu().item(),\n \"is_within_constraint\": bool((constraint_loss[min_loss_id] == 0).item()),\n }\n\n ##################\n ## _get (vars) ##\n ##################\n\n def get_meshed_categorical_vars(\n self, variables: Dict[str, co.Variable]\n ) -> Optional[np.ndarray]:\n \"\"\"\n Get combinations of all categorical (binary, enum) variables\n\n Parameters\n ----------\n variables : Dict[str, co.Variable]\n Variables to be optimized\n\n Returns\n -------\n Optional[np.ndarray]\n Combinations of all categorical variables\n of shape (n_samples, n_vars)\n \"\"\"\n cv_value_list = [\n variable.values\n for variable in variables.values()\n if isinstance(variable, co.EnumVariable)\n ]\n if not cv_value_list:\n return None\n meshed_cv_value_list = [x_.reshape(-1, 1) for x_ in np.meshgrid(*cv_value_list)]\n meshed_cv_value = np.concatenate(meshed_cv_value_list, axis=1)\n return meshed_cv_value\n\n ##################\n ## _check ##\n ##################\n\n @staticmethod\n def within_objective_bounds(obj_value: float, objective: co.Objective) -> bool:\n \"\"\"\n check whether violating the objective value var_ranges\n :param pred_dict: dict, keys are objective names,\n values are objective values\n :param obj_bounds: dict, keys are objective names,\n values are lower and upper var_ranges of each objective value\n :return: True or False\n \"\"\"\n within_bounds = True\n if objective.upper is not None:\n within_bounds = obj_value <= objective.upper\n if objective.lower is not None:\n within_bounds = within_bounds and obj_value >= objective.lower\n return within_bounds" } ]
from typing import Dict, Sequence from torch import nn from ....data.containers.tabular_container import TabularContainer from ....data.extractors.tabular_extractor import TabularFeatureExtractor from ....data.handler.data_processor import DataProcessor from ....data.preprocessors.base_preprocessor import StaticPreprocessor from ....data.tests.iterators.dummy_udao_iterator import DummyUdaoIterator from ....utils.interfaces import UdaoEmbedInput from ...concepts import ( BoolVariable, Constraint, FloatVariable, IntegerVariable, Objective, Variable, ) from ...concepts.problem import MOProblem from ...concepts.utils import InputParameters, InputVariables from ...soo.mogd import MOGD import pytest import torch as th
11,361
class ObjModel1(nn.Module): def forward(self, x: UdaoEmbedInput) -> th.Tensor: return th.reshape(x.features[:, 0] ** 2, (-1, 1)) class ObjModel2(nn.Module): def forward(self, x: UdaoEmbedInput) -> th.Tensor: return th.reshape(x.features[:, 1] ** 2, (-1, 1)) class ComplexObj1(nn.Module): def forward(self, x: UdaoEmbedInput) -> th.Tensor: return th.reshape(x.features[:, 0] ** 2 - x.features[:, 1] ** 2, (-1, 1)) class ComplexObj2(nn.Module): def forward(self, x: UdaoEmbedInput) -> th.Tensor: return th.reshape(x.features[:, 0] ** 2 + x.features[:, 1] ** 2, (-1, 1)) class TabularFeaturePreprocessor(StaticPreprocessor): def preprocess(self, tabular_feature: TabularContainer) -> TabularContainer: tabular_feature.data.loc[:, "v1"] = tabular_feature.data["v1"] / 1 tabular_feature.data.loc[:, "v2"] = (tabular_feature.data["v2"] - 1) / 6 return tabular_feature def inverse_transform(self, tabular_feature: TabularContainer) -> TabularContainer: tabular_feature.data.loc[:, "v1"] = tabular_feature.data["v1"] * 1 tabular_feature.data.loc[:, "v2"] = tabular_feature.data["v2"] * 6 + 1 return tabular_feature @pytest.fixture() def data_processor() -> DataProcessor: return DataProcessor( iterator_cls=DummyUdaoIterator, feature_extractors={ "tabular_features": TabularFeatureExtractor( columns=["v1", "v2"], ), "objectives": TabularFeatureExtractor(columns=["objective_input"]), }, feature_preprocessors={"tabular_features": [TabularFeaturePreprocessor()]}, ) @pytest.fixture def mogd() -> MOGD: return MOGD( MOGD.Params( learning_rate=0.1, max_iters=100, patience=10, multistart=2, objective_stress=10, constraint_stress=1e5, device=th.device("cpu"), ) ) @pytest.fixture def two_obj_problem(data_processor: DataProcessor) -> MOProblem: objectives = [ Objective("obj1", minimize=True, function=ObjModel1()), Objective("obj2", minimize=True, function=ObjModel2()), ]
class ObjModel1(nn.Module): def forward(self, x: UdaoEmbedInput) -> th.Tensor: return th.reshape(x.features[:, 0] ** 2, (-1, 1)) class ObjModel2(nn.Module): def forward(self, x: UdaoEmbedInput) -> th.Tensor: return th.reshape(x.features[:, 1] ** 2, (-1, 1)) class ComplexObj1(nn.Module): def forward(self, x: UdaoEmbedInput) -> th.Tensor: return th.reshape(x.features[:, 0] ** 2 - x.features[:, 1] ** 2, (-1, 1)) class ComplexObj2(nn.Module): def forward(self, x: UdaoEmbedInput) -> th.Tensor: return th.reshape(x.features[:, 0] ** 2 + x.features[:, 1] ** 2, (-1, 1)) class TabularFeaturePreprocessor(StaticPreprocessor): def preprocess(self, tabular_feature: TabularContainer) -> TabularContainer: tabular_feature.data.loc[:, "v1"] = tabular_feature.data["v1"] / 1 tabular_feature.data.loc[:, "v2"] = (tabular_feature.data["v2"] - 1) / 6 return tabular_feature def inverse_transform(self, tabular_feature: TabularContainer) -> TabularContainer: tabular_feature.data.loc[:, "v1"] = tabular_feature.data["v1"] * 1 tabular_feature.data.loc[:, "v2"] = tabular_feature.data["v2"] * 6 + 1 return tabular_feature @pytest.fixture() def data_processor() -> DataProcessor: return DataProcessor( iterator_cls=DummyUdaoIterator, feature_extractors={ "tabular_features": TabularFeatureExtractor( columns=["v1", "v2"], ), "objectives": TabularFeatureExtractor(columns=["objective_input"]), }, feature_preprocessors={"tabular_features": [TabularFeaturePreprocessor()]}, ) @pytest.fixture def mogd() -> MOGD: return MOGD( MOGD.Params( learning_rate=0.1, max_iters=100, patience=10, multistart=2, objective_stress=10, constraint_stress=1e5, device=th.device("cpu"), ) ) @pytest.fixture def two_obj_problem(data_processor: DataProcessor) -> MOProblem: objectives = [ Objective("obj1", minimize=True, function=ObjModel1()), Objective("obj2", minimize=True, function=ObjModel2()), ]
variables: Dict[str, Variable] = {
11
2023-12-20 09:10:42+00:00
16k
XLearning-SCU/2023-TPAMI-SMILE
Net.py
[ { "identifier": "get_dist_release", "path": "DistComput.py", "snippet": "def get_dist_release(loader, dist_path):\r\n if not os.path.exists(dist_path):\r\n # loader = test_loader\r\n num_data = [10]\r\n with torch.no_grad():\r\n dist_list = [[] for i in range(len(num_data))]\r\n for j, data_t in enumerate(loader, 0):\r\n # get all inputs\r\n fea0, fea1, class_labels0, class_labels1, mask, is_pair, idx = data_t\r\n inputs_t = fea0.cuda()\r\n # inputs_t = torch.cat([fea0,fea1]).cuda()\r\n # labels_t = torch.cat([class_labels0,class_labels1]).cuda()\r\n # inputs_t, _, labels_t, _ = data_t\r\n # inputs_t, labels_t = inputs_t.cuda(), labels_t.cuda()\r\n for i in range(len(inputs_t)):\r\n if i % 1000 == 0:\r\n print(i)\r\n aa = torch.mul(inputs_t - inputs_t[i], inputs_t - inputs_t[i])\r\n # dist = torch.sqrt(torch.sum(aa, dim=(2, 3)))\r\n # dist_m = dist[:, 0]\r\n # print(aa.shape)\r\n dist_m = torch.sqrt(torch.sum(aa, dim=tuple(torch.arange(1, len(aa.shape)))))\r\n dist_m[i] = 1000\r\n sorted_dist = np.sort(dist_m.cpu().numpy())\r\n for jj in range(len(num_data)):\r\n dist_list[jj].append(sorted_dist[num_data[jj]])\r\n inputs_t = fea1.cuda()\r\n for i in range(len(inputs_t)):\r\n if i % 1000 == 0:\r\n print(i)\r\n aa = torch.mul(inputs_t - inputs_t[i], inputs_t - inputs_t[i])\r\n # dist = torch.sqrt(torch.sum(aa, dim=(2, 3)))\r\n # dist_m = dist[:, 0]\r\n # print(aa.shape)\r\n dist_m = torch.sqrt(torch.sum(aa, dim=tuple(torch.arange(1, len(aa.shape)))))\r\n dist_m[i] = 1000\r\n sorted_dist = np.sort(dist_m.cpu().numpy())\r\n for jj in range(len(num_data)):\r\n dist_list[jj].append(sorted_dist[num_data[jj]])\r\n for ii in range(len(num_data)):\r\n DirectoryOperator(dist_path).make_fold()\r\n np.savetxt(dist_path, np.array(dist_list[ii]))\r\n\r\n dist = torch.from_numpy(\r\n np.loadtxt(\r\n dist_path\r\n ).astype(np.float32)\r\n )\r\n return dist\r" }, { "identifier": "get_nearest_k", "path": "_Utils/Calculator.py", "snippet": "def get_nearest_k(h0, h1, k=1, sp_size=1000):\r\n hh0 = h0.half()\r\n hh1 = h1.half()\r\n split = int(np.ceil(len(hh0) / sp_size))\r\n near = []\r\n for i in range(split):\r\n dist = torch.cdist(hh0[i * sp_size:(i + 1) * sp_size], hh1)\r\n nearest = torch.argsort(dist, dim=1)[:, :k]\r\n near.append(nearest)\r\n nearest = torch.cat(near)\r\n return nearest\r" }, { "identifier": "update_log", "path": "_Utils/Logs.py", "snippet": "def update_log(dic, path='../log/res.csv'):\r\n index = 'Epoch'\r\n val = []\r\n name = []\r\n for na, v in dic.items():\r\n val.append(v)\r\n name.append(na)\r\n dt = pd.DataFrame([val], columns=name)\r\n dt = dt.set_index(index)\r\n if os.path.exists(path):\r\n dt_old = pd.read_csv(path, index_col=index)\r\n dt = merge_csv(dt_old, dt)\r\n DirectoryOperator(path).make_fold()\r\n dt.to_csv(path)\r" }, { "identifier": "visualize2", "path": "_Utils/Scatter.py", "snippet": "def visualize2(feature_vec, type_vec, group_vec, pred_vec, prefix, ):\r\n fv = feature_vec.reshape((len(feature_vec), -1))\r\n for perplexity in []:# 50\r\n vis_fea_multi = TSNE(perplexity=perplexity).fit_transform(\r\n np.concatenate((fv[group_vec == 0], fv[group_vec == 1]), axis=1)\r\n )\r\n for s in [5]:\r\n prefix2 = prefix + 'P{}S{}'.format(perplexity, s)\r\n visualize_scatter(vis_fea_multi,\r\n fig_path='{}Multi.svg'.format(prefix2),\r\n label_color=type_vec[group_vec == 0],\r\n # label_shape=type_vec,\r\n s=s\r\n )\r\n\r\n for perplexity in [50]:\r\n vis_fea = TSNE(perplexity=perplexity).fit_transform(fv)\r\n for s in [5]: # 5\r\n prefix2 = prefix + 'P{}S{}'.format(perplexity, s)\r\n visualize_scatter(vis_fea,\r\n fig_path='{}Type.svg'.format(prefix2),\r\n label_color=type_vec,\r\n # label_shape=type_vec,\r\n s=s\r\n )\r\n # visualize_scatter(vis_fea,\r\n # fig_path='{}Cluster.svg'.format(prefix),\r\n # label_color=pred_vec,\r\n # label_shape=type_vec,\r\n #\r\n # )\r\n visualize_scatter(vis_fea,\r\n fig_path='{}Group.svg'.format(prefix2),\r\n label_color=group_vec,\r\n # label_shape=type_vec,\r\n s=s\r\n )\r" }, { "identifier": "visualize", "path": "_Utils/Visualize.py", "snippet": "def visualize(feature_vec, type_vec, group_vec, pred_vec, prefix='../Visualization/E{:03d}'.format(0)):\r\n vis_fea = tsne(feature_vec)\r\n visualize_scatter(vis_fea,\r\n fig_path='{}Type.jpg'.format(prefix),\r\n label_color=type_vec,\r\n label_shape=type_vec,\r\n )\r\n visualize_scatter(vis_fea,\r\n fig_path='{}Cluster.jpg'.format(prefix),\r\n label_color=pred_vec,\r\n label_shape=type_vec,\r\n )\r\n visualize_scatter(vis_fea,\r\n fig_path='{}Group.jpg'.format(prefix),\r\n label_color=group_vec,\r\n label_shape=type_vec,\r\n )\r" }, { "identifier": "visual_matrix_console", "path": "_Utils/Visualize.py", "snippet": "def visual_matrix_console(x):\r\n if len(x.shape) <= 2:\r\n x = x.reshape((*x.shape, 1))\r\n base_wid = int(np.log10(np.max(x) + 0.5)) + 1\r\n head_wid = x.shape[2] * (1 + base_wid)\r\n head_sep = int(head_wid // 2) + 1\r\n print('t\\\\c ', end='')\r\n for i in range(x.shape[1]):\r\n print(('{:' + '{}'.format(head_sep) + 'd}').format(i), end=' ' * (head_wid - head_sep))\r\n print()\r\n for i, line in enumerate(x):\r\n print('{:2d}: '.format(i), end='')\r\n for cl in line:\r\n sg = True\r\n for g in cl:\r\n if sg:\r\n sg = False\r\n else:\r\n print(' ', end='')\r\n if g != 0:\r\n # print('base_wid == {}'.format(base_wid))\r\n # print('g == {}'.format(g))\r\n print(('{:' + str(base_wid) + 'd}').format(g), end='')\r\n else:\r\n print(' ' * base_wid, end='')\r\n print('|', end='')\r\n print()\r" }, { "identifier": "visualize_image", "path": "_Utils/Visualize.py", "snippet": "def visualize_image(x, verbose=0, show=False, fig_path=None):\r\n \"\"\"\r\n\r\n :param show:\r\n :param fig_path:\r\n :param x:\r\n (row, line, pic_h, pic_w) or (row, line, pic_h, pic_w, pic_c), pic_c = 1,3,4\r\n :return:\r\n \"\"\"\r\n x = np.asarray(x)\r\n if verbose:\r\n print('img.min() == {}'.format(np.min(x)))\r\n print('img.max() == {}'.format(np.max(x)))\r\n x -= np.min(x)\r\n x /= np.max(x)\r\n row, line = x.shape[:2]\r\n w, h = x.shape[1] * x.shape[3] / 90, x.shape[0] * x.shape[2] / 90\r\n plt.figure(figsize=(w, h)) # w, h\r\n count = 0\r\n for rx in x:\r\n for image in rx:\r\n count += 1\r\n plt.subplot(row, line, count)\r\n plt.imshow(image, cmap='gray', )\r\n plt.xticks([])\r\n plt.yticks([])\r\n\r\n plt.subplots_adjust(left=0, right=1, top=1, bottom=0, hspace=0.1 / h, wspace=0.1 / w)\r\n\r\n if not show and fig_path is None:\r\n fig_path = '../_fig/fig.jpg'\r\n if fig_path is not None:\r\n DirectoryOperator.FoldOperator(directory=fig_path).make_fold()\r\n plt.savefig(fig_path, transparent=True)\r\n if show:\r\n plt.show()\r\n plt.close()\r" }, { "identifier": "plot_heat_map", "path": "_Utils/Visualize.py", "snippet": "def plot_heat_map(z, xticks=None, yticks=None, xlabel=None, ylabel=None, title=None, show=False, fig_path=None):\r\n \"\"\"\r\n\r\n :param z: z[i,j] shown in i-th row, j-th line\r\n :param xlabel:\r\n :param ylabel:\r\n :param show:\r\n :param fig_path:\r\n :return:\r\n \"\"\"\r\n left = 0.15\r\n right = 1\r\n top = 0.95\r\n bottom = 0.15\r\n w, h = z.shape\r\n plt.figure(figsize=(w / (right - left), h / (top - bottom)))\r\n\r\n # plt.figure(figsize=(w / (right - left), h / (top - bottom)))\r\n # plt.subplots_adjust(left=left, right=right, top=top, bottom=bottom)\r\n\r\n if xticks is not None:\r\n plt.xticks(np.arange(len(xticks)), np.round(xticks, 2), rotation=45)\r\n if yticks is not None:\r\n plt.yticks(np.arange(len(yticks)), np.round(yticks, 2))\r\n for i in range(z.shape[0]):\r\n for j in range(z.shape[1]):\r\n # plt.text(j, i, accs[i, j].round(2), ha=\"center\", va=\"center\", color=\"b\", fontsize=12,\r\n # fontname='Times New Roman')\r\n plt.text(j, i, z[i, j], ha=\"center\", va=\"center\")\r\n\r\n if xlabel is not None:\r\n plt.xlabel(xlabel)\r\n if ylabel is not None:\r\n plt.ylabel(ylabel)\r\n if title is not None:\r\n plt.title(title)\r\n plt.imshow(z, interpolation='nearest', aspect='auto')\r\n\r\n plt.colorbar()\r\n if fig_path is not None:\r\n DirectoryOperator.FoldOperator(directory=fig_path).make_fold()\r\n plt.savefig(fig_path, transparent=True)\r\n if show:\r\n plt.show()\r\n plt.close()\r" }, { "identifier": "TimeOperator", "path": "_Utils/TimeOperator.py", "snippet": "class TimeOperator:\r\n def __init__(self):\r\n self.time_buffer = None\r\n self.time_record = 0\r\n self.time_sum = 0\r\n self.time_count = 0\r\n\r\n def time(self, output=False, promt=''):\r\n if self.time_buffer is None:\r\n self.time_buffer = time()\r\n else:\r\n self.time_record = time() - self.time_buffer\r\n self.time_buffer = None\r\n self.time_sum += self.time_record\r\n self.time_count += 1\r\n if output:\r\n print('{}Time == {:7.05f}'.format(promt, self.time_record))\r\n\r\n def get_time_sum(self):\r\n return self.time_sum\r\n\r\n def show_time_sum(self):\r\n print('{:.02f}'.format(self.get_time_sum()))\r\n\r\n def get_fps(self):\r\n return self.time_count / self.time_sum\r\n\r\n def __get_speed(self, to_metric=None):\r\n speed = self.get_fps()\r\n metric = 'Second'\r\n if speed < 1 and to_metric != metric:\r\n speed *= 60\r\n metric = 'Minute'\r\n if speed < 1 and to_metric != metric:\r\n speed *= 60\r\n metric = 'Hour'\r\n if speed < 1 and to_metric != metric:\r\n speed *= 24\r\n metric = 'Day'\r\n return speed, metric\r\n\r\n def show_process(self, process_now, process_total, name='Epoch'):\r\n if self.time_sum <= 0:\r\n return\r\n speed = self.time_sum / self.time_count\r\n print('{:<5s} [{:3.0f}/{:3.0f}] [{:8.02f}/{:8.02f}]: {:5.02f}({:5.02f}) '.format(\r\n name, process_now, process_total,\r\n process_now * speed, process_total * speed,\r\n self.time_record, speed\r\n ))\r\n\r\n def show_speed(self):\r\n speed, metric = self.__get_speed()\r\n print('{:4.01f} Frames/{}'.format(speed, metric))\r" }, { "identifier": "DirectoryOperator", "path": "_Utils/DirectoryOperator.py", "snippet": "class DirectoryOperator:\r\n def __init__(self, directory: str):\r\n self.directory = directory\r\n\r\n def make_fold(self):\r\n if not TestMode:\r\n # print('mk dir {}'.format(os.path.dirname(self.directory)))\r\n os.makedirs(os.path.dirname(self.directory), exist_ok=True)\r\n\r\n def modification_time(self):\r\n if os.path.exists(self.directory):\r\n return os.path.getmtime(self.directory)\r\n else:\r\n warnings.warn('Time_now is returned since the modification time for non-exist file is not available. File: {}'.format(self.directory))\r\n return time.time()\r" }, { "identifier": "get_clusters", "path": "DataSetMaster/dataset.py", "snippet": "def get_clusters(args):\n item_path = os.path.join(path_operator.get_checkpoint_path(level=1), 'Items0321')\n file_mnist_test = os.path.join(item_path, 'mnist_test_clusters89.67.txt')\n file_mnist_train = os.path.join(item_path, 'MnistTrain94.31B256.txt')\n file_amazon = os.path.join(item_path, 'amazon72.81B032ReValue.txt')\n file_webcam = os.path.join(item_path, 'webcamOurLoaderRevalveBatchWiseB032_84.03.txt')\n file_usps = os.path.join(item_path, 'usps_train_clusters85.10.txt')\n root_har = os.path.join(item_path, 'HAR')\n root_mtfl = os.path.join(item_path, 'MTFL')\n\n if args.dataset == 'MNISTUSPS': # 87.75 93.31\n if args.MnistTrain:\n file_mnist = file_mnist_train\n else:\n file_mnist = file_mnist_test\n file_list = [\n file_mnist,\n file_usps,\n ]\n elif args.dataset == 'ReverseMNIST': # 89.67 94.31\n if args.MnistTrain:\n file_mnist = file_mnist_train\n else:\n file_mnist = file_mnist_test\n file_list = [\n file_mnist,\n file_mnist,\n ]\n elif args.dataset == 'Office': # 75.28\n file_list = [\n file_amazon,\n file_webcam,\n ]\n elif args.dataset == 'MTFL':\n file_list = np.sort([os.path.join(root_mtfl, f) for f in os.listdir(root_mtfl) if f.endswith('txt')])\n elif args.dataset == 'HAR': # 81.70\n file_list = np.sort([os.path.join(root_har, f) for f in os.listdir(root_har) if f.endswith('txt')])\n else:\n raise NotImplementedError(\"\")\n\n def debug(x):\n print(x.shape)\n return x\n\n clusters = torch.cat(\n [debug(torch.from_numpy(np.loadtxt(c).astype(np.float32)).long()) for c in file_list],\n dim=0,\n ).cuda()\n return clusters" }, { "identifier": "svm_classify", "path": "classification.py", "snippet": "def svm_classify(data, data_gt, label, test_prop, C):\n \"\"\"\n trains a linear SVM on the data\n input C specifies the penalty factor of SVM\n \"\"\"\n seed = random.randint(0, 1000)\n train_idx, test_idx = TT_split(data.shape[1], test_prop, seed)\n train_data = np.concatenate([data[0][train_idx], data[1][train_idx]], axis=1)\n test_data = np.concatenate([data_gt[0][test_idx], data_gt[1][test_idx]], axis=1)\n test_label = label[test_idx]\n train_label = label[train_idx]\n\n # print('training SVM...')\n clf = svm.LinearSVC(C=C, dual=False)\n clf.fit(train_data, train_label.ravel())\n\n p = clf.predict(test_data)\n test_acc = accuracy_score(test_label, p)\n\n return test_acc" }, { "identifier": "UMAP", "path": "evaluate.py", "snippet": "def UMAP(feature_vec, type_vec, group_vec, pred_vec, n_type, n_batch, args, epoch, dst_root='../Visualization'):\n t = time.time()\n # print(\"Performing UMAP Visualization...\")\n # print('feature_vec.shape == {}'.format(feature_vec.shape))\n sc.set_figure_params(figsize=(4, 4), dpi=300)\n\n # type_vec = pd.DataFrame(type_vec)\n # for key in cell_type_dict.keys():\n # type_vec.replace(key, cell_type_dict[key], inplace=True)\n # group_vec = pd.DataFrame(group_vec)\n # for key in batch_dict.keys():\n # batch_vec.replace(key, batch_dict[key], inplace=True)\n\n adata = sc.AnnData(feature_vec)\n # print('adata.shape == {}'.format(adata.shape))\n sc.pp.neighbors(adata)\n adata.obs['cluster'] = pd.DataFrame(pred_vec).values.astype(np.str_)\n adata.obs['type'] = pd.DataFrame(type_vec).values.astype(np.str_)\n adata.obs['group'] = pd.DataFrame(group_vec).values.astype(np.str_)\n\n sc.tl.umap(adata)\n sc.pl.umap(adata,\n color=['cluster'],\n palette=sns.color_palette(\"husl\", n_type),\n save='E{:03d}UmapCluster{}.png'.format(epoch, str(args.dataset)),\n show=False)\n sc.pl.umap(adata,\n color=['type'],\n palette=sns.color_palette(\"husl\", n_type),\n save='E{:03d}UmapType{}.png'.format(epoch, str(args.dataset)),\n show=False)\n sc.pl.umap(adata,\n color=['group'],\n palette=sns.color_palette(\"hls\", n_batch),\n save='E{:03d}UmapGroup{}.png'.format(epoch, str(args.dataset)),\n show=False)\n roott = './figures/'\n for root, dirs, files in os.walk(roott):\n # print(root)\n # print(dirs)\n # print(files)\n for f in files:\n # print(os.path.join('../Visualization', f))\n FileOperator(\n os.path.join(root, f)\n ).rename(\n os.path.join(dst_root, f.replace('umapE', 'E')),\n auto_rename=False\n )\n if PrintTimer:\n print('VisualizeScatter finished with in {:.03f} seconds (x.shape == {}).'.format(\n time.time() - t,\n feature_vec.shape,\n ))" }, { "identifier": "evaluate2", "path": "evaluate.py", "snippet": "def evaluate2(feature_vec, pred_vec, type_vec, group_vec):\n nmi, ari, acc, pred_adjusted = cluster_metrics(type_vec, pred_vec)\n gs = np.unique(group_vec)\n ts = np.unique(type_vec)\n class_num = len(ts)\n group_num = len(gs)\n if group_vec is not None and group_num > 1:\n balance, entro = my_balance(pred_vec, group_vec, cluster_num=np.unique(type_vec).shape[0],\n group_num=np.unique(group_vec).shape[0])\n O = torch.zeros((class_num, group_num)).cuda()\n\n for b in gs:\n ind_g = b == group_vec\n pred_vec_g = pred_vec[ind_g]\n for t in ts:\n O[t, b] = np.sum(pred_vec_g == t)\n O += 1e-6\n O = (O / torch.sum(O))\n NmiFair = normalized_mutual_information(O).cpu().numpy()\n Fmeasure = FMeasure(beta=1)(acc, NmiFair)\n else:\n balance, entro = 0, 0\n NmiFair = 0\n Fmeasure = 0\n entro_v = np.mean(entro)\n global BestAcc, BestAri, BestNmi, BestBalance, BestEntropy, BestFairness, BestNmiFair, BestFmeasure\n if BestAcc < acc:\n BestAcc = acc\n if BestAri < ari:\n BestAri = ari\n if BestNmi < nmi:\n BestNmi = nmi\n if BestBalance < balance:\n BestBalance = balance\n # if BestFairness < fairness:\n # BestFairness = fairness\n if BestNmiFair < NmiFair:\n BestNmiFair = NmiFair\n if BestFmeasure < Fmeasure:\n BestFmeasure = Fmeasure\n if BestEntropy < entro_v:\n BestEntropy = entro_v\n\n print(\n 'NMI={:5.02f}|{:5.02f}, ARI={:5.02f}|{:5.02f}, ACC={:5.02f}|{:5.02f}, Balance={:5.02f}|{:5.02f}, NmiFair={:5.02f}|{:5.02f}, Fmeasure={:5.02f}|{:5.02f}, Entropy={:5.02f}|{:5.02f}[{}],'.format(\n nmi * 100, BestNmi * 100,\n ari * 100, BestAri * 100,\n acc * 100, BestAcc * 100,\n balance * 100, BestBalance * 100,\n # fairness * 100, BestFairness * 100,\n NmiFair * 100, BestNmiFair * 100,\n Fmeasure * 100, BestFmeasure * 100,\n entro_v, BestEntropy, entro\n )\n )\n met = {\n 'nmi' : nmi,\n 'ari' : ari,\n 'acc' : acc,\n 'balance' : balance,\n 'NmiFair' : NmiFair,\n 'Fmeasure': Fmeasure,\n }\n return pred_adjusted, met\n # tqdm.write('NMI=%.4f, ACC=%.4f, ARI=%.4f' % (nmi, acc, ari), end='')\n # if fair_metric:\n # kl, ari_b = fair_metrics(feature_vec, group_vec, pred_vec, type_vec)\n # print(', KL=%.4f, ARI_b=%.4f' % (kl, ari_b), end='')\n # tqdm.write('')" }, { "identifier": "visual_image_scatter", "path": "figures/ScatterMaster.py", "snippet": "def visual_image_scatter():\r\n np_path = os.path.join(\r\n 'D:/VirtualMachine/Codes/230904/SMAIL_RunSet_Visual/ --QuickConfig C100 --VisualFreq 5 --VisualRandom 1 --dataset NoisyMNIST30000 --seed 1999 --train_epoch 100/Checkpoints/Epoch099.npz')\r\n # np_path_row = os.path.join(root, np_paths[np_names.index(np_tag)], 'NpPoints', np_epoch)\r\n\r\n data = np.load(np_path, allow_pickle=False)\r\n data_vec = data['data_vec']\r\n feature_vec = data['feature_vec']\r\n group_vec = data['group_vec']\r\n type_vec = data['type_vec']\r\n\r\n # visualize_image(x=[\r\n # [it.reshape([28, 28]) for it in data_vec[:10]],\r\n # [it.reshape([28, 28]) for it in data_vec[10:20]],\r\n # [it.reshape([28, 28]) for it in data_vec[20:30]],\r\n # ], show=True)\r\n\r\n DrawMax = 3000\r\n if len(feature_vec) > DrawMax:\r\n it = np.arange(len(feature_vec))\r\n np.random.shuffle(it)\r\n ind = it[:DrawMax]\r\n feature_vec = feature_vec[ind]\r\n type_vec = type_vec[ind]\r\n group_vec = group_vec[ind]\r\n data_vec = data_vec[ind]\r\n vis_fea = TSNE(perplexity=50).fit_transform(feature_vec)\r\n\r\n _, ax = plt.subplots(figsize=(5 * 1 * 2, 5 * 1 * 2 / 1.6))\r\n\r\n label_color = np.unique(type_vec)\r\n color_num = len(np.unique(type_vec))\r\n # if color_num <= 2:\r\n # cmap = None\r\n if color_num <= 10:\r\n cmap = 'tab10'\r\n elif color_num <= 20:\r\n cmap = 'tab20'\r\n else:\r\n cmap = 'gist_ncar'\r\n for digit in np.unique(type_vec):\r\n ax.scatter(\r\n *vis_fea[type_vec == digit].T,\r\n # marker=f\"${digit}$\",\r\n s=0.5,\r\n # color=plt.cm.Dark2(digit),\r\n alpha=0.7,\r\n c=type_vec[type_vec == digit],\r\n cmap=cmap,\r\n vmax=max(4, np.max(label_color)),\r\n vmin=min(0, np.min(label_color)),\r\n zorder=2,\r\n )\r\n w = int(np.sqrt(len(data_vec[0])))\r\n h = w\r\n shown_images = np.array([[1.0, 1.0]]) # just something big\r\n for i in range(data_vec.shape[0]):\r\n # plot every digit on the embedding\r\n # show an annotation box for a group of digits\r\n dist = np.sum((vis_fea[i] - shown_images) ** 2, 1)\r\n if np.min(dist) < 2e1:\r\n # don't show points that are too close\r\n continue\r\n if np.min(dist) < 2e1:\r\n # don't show points that are too close\r\n continue\r\n shown_images = np.concatenate([shown_images, [vis_fea[i]]], axis=0)\r\n # img = offsetbox.OffsetImage(data_vec[i].reshape([w, h]), cmap=plt.cm.gray_r, )\r\n img = offsetbox.OffsetImage(data_vec[i].reshape([w, h]), cmap=plt.cm.gray_r, zoom=0.5)\r\n # img.ti\r\n imagebox = offsetbox.AnnotationBbox(\r\n img, # [w, h, 3]\r\n vis_fea[i],\r\n pad=0,\r\n frameon=False\r\n )\r\n imagebox.set(zorder=1)\r\n ax.add_artist(imagebox)\r\n\r\n ax.set_title('title')\r\n ax.axis(\"off\")\r\n plt.tight_layout()\r\n plt.savefig('D:/Pengxin/Temp/tmp.pdf')\r\n plt.show()\r\n\r\n print()\r\n pass\r" } ]
import math import os import time import warnings import numpy as np import torch import torchvision import torch.nn.functional as F import evaluate import faiss import scipy.io as sio from torch import nn from torch.autograd import Variable from DistComput import get_dist_release from _Utils.Calculator import get_nearest_k from _Utils.Logs import update_log from _Utils.Scatter import visualize2 from _Utils.Visualize import visualize, visual_matrix_console, visualize_image, plot_heat_map from _Utils import TimeOperator, DirectoryOperator from DataSetMaster.dataset import get_clusters from classification import svm_classify from evaluate import UMAP, evaluate2 from sklearn import metrics from munkres import Munkres from figures.ScatterMaster import visual_image_scatter
10,952
# xs_hat=fea1_rec[mask[:, 1] == 1], xs=fea1[mask[:, 1] == 1]).cpu().numpy() # rnmse_vec[1].extend(n1_v0) # rnmse_vec[1].extend(n1_v1) g = torch.concat((torch.zeros(len(fea0), device=fea0.device, dtype=torch.int), torch.ones(len(fea1), device=fea0.device, dtype=torch.int))) h = torch.cat([h0, h1]).detach().cpu().numpy() feature_vec.extend(h) data_vec.extend(torch.cat([fea0, fea1]).detach().cpu().numpy()) group_vec.extend(g.cpu().numpy()) type_vec.extend(torch.concat((class_labels0, class_labels1)).numpy()) inf_data_t = time.time() feature_vec = np.array(feature_vec) data_vec = np.array(data_vec) feature_vec_cluster = np.array(feature_vec_cluster) is_pair_all = np.array(is_pair_all) feature_vec_classification = np.array(feature_vec_classification) group_vec = np.array(group_vec) group_vec_cluster = np.array(group_vec_cluster) type_vec = np.array(type_vec) type_vec_cluster = np.array(type_vec_cluster) rnmse_vec[0] = np.array(rnmse_vec[0]) rnmse_vec[1] = np.array(rnmse_vec[1]) kmeans_time = TimeOperator.Timer() if args.ShowReconstruct: if args.dataset == 'MNISTUSPS': dims = [np.product(d.data.shape[1:]) for d in test_dataloader.dataset.datasets] data_list = [np.asarray(it.data, dtype=np.float32) for it in test_dataloader.dataset.datasets] Y = test_dataloader.dataset.datasets[0].targets else: dims = [d.shape[1] for d in test_dataloader.dataset.data] data_list = [np.asarray(it, dtype=np.float32) for it in test_dataloader.dataset.data] Y = test_dataloader.dataset.class_labels0 mask = test_dataloader.dataset.mask n_per_cat = 10 rec0, rec1 = self.decode([ torch.from_numpy(feature_vec[group_vec == 0]).cuda(), torch.from_numpy(feature_vec[group_vec == 1]).cuda()]) rec0 = rec0.detach().cpu().numpy() rec1 = rec1.detach().cpu().numpy() show_img = np.asarray([]) inds_map = np.asarray([]) for v in range(2): col = np.asarray([]) inds_map_col = np.asarray([]) for y in range(10): inds = np.arange(len(Y))[ np.logical_and(np.logical_and(mask[:, v] == 1, mask[:, 1 - v] == 0), Y == y) ] np.random.shuffle(inds) assert len(inds) >= n_per_cat inds = inds[:n_per_cat] raw_imgs = data_list[v][inds] missing_imgs = data_list[1 - v][inds] rec_imgs = [rec0, rec1][v][inds] rec_imgs_miss = [rec0, rec1][1 - v][inds] pack = np.asarray( [raw_imgs, rec_imgs, missing_imgs, rec_imgs_miss]).reshape([-1, n_per_cat, 28, 28]) if len(col): col = np.concatenate([col, pack], axis=0) else: col = pack if len(inds_map_col): inds_map_col = np.concatenate([inds_map_col, inds.reshape([1, -1])], axis=0) else: inds_map_col = inds.reshape([1, -1]) if len(show_img): show_img = np.concatenate([show_img, col], axis=1) else: show_img = col if len(inds_map): inds_map = np.concatenate([inds_map, inds_map_col], axis=1) else: inds_map = inds_map_col plot_heat_map(inds_map, show=True, fig_path='/xlearning/pengxin/Temp/MissingRecIM.svg') visualize_image(show_img, show=True, fig_path='/xlearning/pengxin/Temp/MissingRec.svg') selected_ind = [ [8, 2, 8, 9, 7, 2, 5, 9, 9, 9], [0, 2, 2, 3, 5, 7, 7, 9, 7, 0], ] # ToMouxin inds_to_mouxin = [ [im[si] for im, si in zip(inds_map[:, :n_per_cat], selected_ind[0])], [im[si] for im, si in zip(inds_map[:, n_per_cat:], selected_ind[1])], ] re_dt = np.load( '/xlearning/pengxin/Checkpoints/MultiClustering/RunSets/230105/IMvC_RunSet0114_Ablation_FakeSampleWise/ --QuickConfig X50C50 --dataset MNISTUSPS --loss_sim_contras 0.02 --seed 1998/SampleCache/Np.npz') np.savez('/xlearning/pengxin/Temp/MNISTUSPS_show.npz', feature_vec=np.asarray([ re_dt['d0_data'][inds_to_mouxin[0]], re_dt['d1_data'][inds_to_mouxin[1]] ])) selected_ind_global = np.concatenate( (np.asarray(selected_ind[0]).reshape([-1, 1]), np.asarray(selected_ind[1]).reshape([-1, 1]) + n_per_cat), axis=1 ) show_img_final = np.concatenate( [show_img[4 * i:4 * i + 4, selected_ind_global[i]] for i in range(len(selected_ind_global))], axis=1 )[:, [i * 2 for i in range(10)] + [i * 2 + 1 for i in range(10)]] visualize_image(show_img_final, show=True, fig_path='/xlearning/pengxin/Temp/MissingRecFinal.svg') return def cluster_and_measure(features, types, groups, row_pred=False): kst = time.time() centroids = torch.from_numpy(kmeans(features, self.class_num)) if args.ElActivationType in ['Normalize', 'BnNormalize', 'BnReNormalize']: centroids = F.normalize(centroids, dim=1) pred_vec = np.argmax(self.soft_ass(torch.from_numpy(features), centroids).numpy(), axis=1)
def show_distribution_ct(type_vec, group_vec, pred_vec, class_num, group_num): v = np.zeros((class_num, class_num, group_num), dtype=int) for t, c, g in zip(type_vec, pred_vec, group_vec): v[t, c, g] += 1 visual_matrix_console(x=v) def kmeans(feature_vec, class_num): d = feature_vec.shape[1] kmeans = faiss.Clustering(d, class_num) kmeans.verbose = False kmeans.niter = 300 kmeans.nredo = 10 # kmeans.spherical = True # if LimitKmeans: # kmeans.max_points_per_centroid = 1000 # kmeans.min_points_per_centroid = 10 res = faiss.StandardGpuResources() cfg = faiss.GpuIndexFlatConfig() cfg.useFloat16 = True cfg.device = 0 index = faiss.GpuIndexFlatL2(res, d, cfg) # print(feature_vec.shape) kmeans.train(feature_vec, index) centroids = faiss.vector_to_array(kmeans.centroids).reshape(class_num, d) return centroids def show_distribution(cluster_vec, group_vec, class_num, group_num): for it in np.arange(group_num): print('{:4d}, '.format(it), end='') print('') cluster_group = torch.zeros((class_num, group_num), dtype=torch.int) for i, j in zip(cluster_vec, group_vec): cluster_group[i, j] += 1 # cluster_group = cluster_group[torch.argsort(torch.sum(cluster_group, dim=1))] for line in cluster_group: print('{:4d}: '.format(torch.sum(line)), end='') for it in line: print('{:4d}, '.format(it), end='') print('') def save_checkpoint(state, epoch): """ it has been trained for *epoch* epochs """ filename = 'Epoch{:03d}.checkpoint'.format(epoch) checkpoint_dir = os.path.join( os.path.dirname(os.getcwd()), 'Checkpoints', filename ) DirectoryOperator.FoldOperator(directory=checkpoint_dir).make_fold() if os.path.exists(checkpoint_dir): warnings.warn('Checkpoint exist and been replaced.({})'.format(checkpoint_dir)) print('Save check point into {}'.format(checkpoint_dir)) torch.save(state, checkpoint_dir) def get_ffn(dims, last_layers=None, with_bn=False, drop_out=0): layers = [] for ind in range(len(dims) - 1): in_dim = dims[ind] out_dim = dims[ind + 1] layers.append(nn.Linear(in_dim, out_dim)) if with_bn: layers.append(nn.BatchNorm1d(out_dim)) layers.append(nn.ReLU()) if drop_out: layers.append(nn.Dropout(drop_out)) if last_layers is not None: layers.extend(last_layers) return nn.Sequential(*layers) def get_cov(dims, strides, last_layers=None, with_bn=False, drop_out=0): layers = [] for ind in range(len(dims) - 1): in_dim = dims[ind] out_dim = dims[ind + 1] stride = strides[ind] # layers.append(nn.Linear(in_dim, out_dim)) if stride >= 0: layers.append(nn.Conv2d(in_dim, out_dim, kernel_size=3, stride=stride, padding=1)) else: layers.append(nn.ConvTranspose2d( in_dim, out_dim, kernel_size=3, stride=-stride, padding=1, output_padding=0 if stride == -1 else 1)) if with_bn: # layers.append(nn.BatchNorm1d(out_dim)) layers.append(nn.BatchNorm2d(out_dim)) layers.append(nn.ReLU()) if drop_out: layers.append(nn.Dropout(drop_out)) if last_layers is not None: layers.extend(last_layers) return nn.Sequential(*layers) class Net(nn.Module): def __init__(self, args, in_dims, class_num, group_num): super(Net, self).__init__() self.encoder_adaption = nn.ModuleList([ get_ffn([in_dims[i], 1024], with_bn=args.BatchNormType[0] == '1', drop_out=args.Dropout) for i in range(group_num if args.GroupWiseLayer[0] == '1' else 1)]) self.encoder = nn.ModuleList([ get_ffn([1024, 1024, 512], with_bn=args.BatchNormType[1] == '1', drop_out=args.Dropout) for _ in range(group_num if args.GroupWiseLayer[1] == '1' else 1)]) if args.representation_dim == 0: args.representation_dim = class_num self.class_num = class_num self.group_num = group_num self.pred_cac = None self.pred_center_cac = None if args.ElActivationType == 'None': el_activation_ = [] elif args.ElActivationType == 'Normalize': el_activation_ = [] elif args.ElActivationType == 'BnNormalize': el_activation_ = [nn.BatchNorm1d(args.representation_dim)] elif args.ElActivationType == 'BnReNormalize': el_activation_ = [nn.BatchNorm1d(args.representation_dim), nn.ReLU()] elif args.ElActivationType == 'BnRe': el_activation_ = [nn.BatchNorm1d(args.representation_dim), nn.ReLU()] else: raise NotImplementedError('') self.el_activation_ = el_activation_ self.encoder_linear = nn.ModuleList([ get_ffn([512, 256], with_bn=args.BatchNormType[2] == '1', drop_out=args.Dropout, last_layers=[nn.Linear(256, args.representation_dim)] + self.el_activation_) for _ in range(group_num if args.GroupWiseLayer[2] == '1' else 1)]) dec_in = args.representation_dim if args.McDecoder: dec_in *= group_num self.dec_in = dec_in self.decoder_linear = nn.ModuleList([ get_ffn([self.dec_in, 256, 512], with_bn=args.BatchNormType[3] == '1', drop_out=args.Dropout) for _ in range(group_num if args.GroupWiseLayer[3] == '1' else 1)]) if args.ActivationType == 'None': final_activation_ = [] elif args.ActivationType == 'Sigmoid': final_activation_ = [nn.Sigmoid()] elif args.ActivationType == 'Tanh': final_activation_ = [nn.Tanh()] else: raise NotImplementedError('') self.final_activation_ = final_activation_ self.decoder = nn.ModuleList([ get_ffn([512, 1024, 1024], with_bn=args.BatchNormType[4] == '1', drop_out=args.Dropout) for _ in range(group_num if args.GroupWiseLayer[4] == '1' else 1)]) self.decoder_adaption = nn.ModuleList([ get_ffn([], last_layers=[nn.Linear(1024, in_dims[i])] + self.final_activation_) for i in range(group_num if args.GroupWiseLayer[5] == '1' else 1)]) self.args = args self.in_dims = in_dims # def update_cluster_center(self, center): # self.cluster_centers = F.normalize(torch.from_numpy(center), dim=1).cuda() def forward(self, x, **kwargs): return self.decode(self.encode([x])) def encode(self, xs: list): hs = [] for g, x in enumerate(xs): if self.args.noise_type == 'None': pass elif self.args.noise_type == 'Drop': x = x * (Variable(x.data.new(x.size()).normal_(0, 0.1)) < self.args.noise_weight).type_as(x) elif self.args.noise_type == 'Add': x = x + Variable(x.data.new(x.size()).normal_(0, self.args.noise_weight)).type_as(x) else: raise NotImplementedError('') if len(x) != 0: if len(x) == 1: x = torch.concat([x, x]) # print(x.shape) # x = x.view((len(x), -1)) # print(x.shape) x = self.encoder_adaption[g if self.args.GroupWiseLayer[0] == '1' else 0](x) x = self.encoder[g if self.args.GroupWiseLayer[1] == '1' else 0](x) x = self.encoder_linear[g if self.args.GroupWiseLayer[2] == '1' else 0](x) if len(x) == 1: x = x[[0]] if self.args.ElActivationType in ['Normalize', 'BnNormalize', 'BnReNormalize']: x = F.normalize(x, dim=1) else: x = torch.zeros([0, self.args.representation_dim], device=torch.device('cuda:0')) hs.append(x) return hs def soft_ass(self, h, centroids): if self.args.ElActivationType in ['Normalize', 'BnNormalize', 'BnReNormalize']: return h @ centroids.T else: dst = torch.cdist(h, centroids) # return (torch.mean(dst) - dst) / (torch.amax(dst) - torch.amin(dst)) * 2 return -dst / 2 # def encode_class(self, hs): # cs = [] # for h in hs: # c = h @ self.cluster_centers.T # cs.append(c) # return cs def decode(self, hs): xs = [] for g, h in enumerate(hs): if self.args.McDecoder: h = torch.cat(hs, dim=1) if len(h) != 0: if len(h) == 1: h = torch.concat([h, h]) h = self.decoder_linear[g if self.args.GroupWiseLayer[3] == '1' else 0](h) h = self.decoder[g if self.args.GroupWiseLayer[4] == '1' else 0](h) h = self.decoder_adaption[g if self.args.GroupWiseLayer[5] == '1' else 0](h) if len(h) == 1: h = h[[0]] else: h = torch.zeros([0, self.in_dims[g]], device=torch.device('cuda:0')) xs.append(h) return xs def run(self, epochs, train_dataloader, test_dataloader, args): # if args.loss_self_cons: # clusters = get_clusters(args=args) optimizer_g = torch.optim.Adam( self.parameters(), lr=args.LearnRate, betas=(args.betas_a, args.betas_v), weight_decay=args.WeightDecay ) mse_loss = nn.MSELoss().cuda() timer_all = TimeOperator.Timer() timer_train = TimeOperator.Timer() timer_save = TimeOperator.Timer() ce_loss = nn.CrossEntropyLoss().cuda() type_detail_shown = False start_epoch = 0 if args.resume: if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume) # if args.gpu is None: # checkpoint = torch.load(args.resume) # else: # # Map model to be loaded to specified single gpu. # loc = 'cuda:{}'.format(args.gpu) # checkpoint = torch.load(args.resume, map_location=loc) start_epoch = checkpoint['epoch'] self.load_state_dict(checkpoint['state_dict']) optimizer_g.load_state_dict(checkpoint['optimizer']['optimizer_g']) # self.__dict__ = checkpoint['self_dic'] print("=> loaded checkpoint '{}' (epoch {})" .format(args.resume, checkpoint['epoch'])) # self.args = args # warnings.warn('This is not equal to start from the beginning due to different rands states.') # else: raise NotImplementedError("=> no checkpoint found at '{}'".format(args.resume)) if args.CodeTest: args.train_epoch = start_epoch + 1 epochs = start_epoch + 1 best_acc = 0 for epoch in range(start_epoch, epochs): if (epoch + 1) <= args.LearnRateWarm: lr = args.LearnRate * (epoch + 1) / args.LearnRateWarm else: if args.LearnRateDecayType == 'None': lr = args.LearnRate elif args.LearnRateDecayType == 'Exp': lr = args.LearnRate * ((1 + 10 * (epoch + 1 - args.LearnRateWarm) / ( args.train_epoch - args.LearnRateWarm)) ** -0.75) elif args.LearnRateDecayType == 'Cosine': lr = args.LearnRate * 0.5 * (1. + math.cos( math.pi * (epoch + 1 - args.LearnRateWarm) / (args.train_epoch - args.LearnRateWarm))) else: raise NotImplementedError('args.LearnRateDecayType') if lr != args.LearnRate: def adjust_learning_rate(optimizer): print('adjust_learning_rate: {}'.format(lr)) for param_group in optimizer.param_groups: param_group['lr'] = lr adjust_learning_rate(optimizer_g) timer_all_time = time.time() # inf_t = time.time() # print('start epoch {}'.format(epoch)) self.eval() feature_vec, type_vec, group_vec = [], [], [] feature_vec_cluster = [] group_vec_cluster = [] feature_vec_classification = [] type_vec_cluster = [] data_vec = [] is_pair_all = [] timer_infer_data = TimeOperator.Timer() rnmse_vec = [[], []] # mask = 0 1 with torch.no_grad(): inf_data_t = time.time() for (fea0, fea1, class_labels0, class_labels1, mask, is_pair, index) in test_dataloader: timer_infer_data.update(time.time() - inf_data_t) # timer_infer_data.show(prefix='InferDataTime', total_count=len(test_dataloader), # print_end_time=False) fea0 = fea0.cuda() fea1 = fea1.cuda() if args.Rev: h1, h0 = self.encode([fea0, fea1]) if args.SingleView != -1: for v in range(len(mask[0])): if v != 1 - args.SingleView: mask[:, v] = 0 else: h0, h1 = self.encode([fea0, fea1]) if args.SingleView != -1: for v in range(len(mask[0])): if v != args.SingleView: mask[:, v] = 0 cluster_h0 = h0[mask[:, 0] == 1] cluster_h1 = h1[mask[:, 1] == 1] # if args.SingleView != -1: # mask[:, args.SingleView] = 0 # # if args.SingleView == 0: # # cluster_h1 = cluster_h1[[]] # # class_labels1 = class_labels1[[]] # # elif args.SingleView == 1: # # class_labels0 = class_labels0[[]] # # cluster_h0 = cluster_h0[[]] # # else: # # raise NotImplementedError('') is_pair_all.extend(is_pair) feature_vec_cluster.extend(torch.cat([cluster_h0, cluster_h1]).detach().cpu().numpy()) group_vec_cluster.extend(torch.concat((torch.zeros(len(cluster_h0), dtype=torch.int), torch.ones(len(cluster_h1), dtype=torch.int))).numpy()) type_vec_cluster.extend(torch.concat((class_labels0[mask[:, 0] == 1], class_labels1[mask[:, 1] == 1])).numpy()) feature_vec_classification.extend(torch.cat([h0, h1]).detach().cpu().numpy()) if (epoch + 1) == epochs or (epoch + 1) % args.VisualFreq == 0: if torch.sum(torch.logical_not(torch.logical_or(mask[:, 1], mask[:, 0]))): raise NotImplementedError('存在一个pair两个模态都缺失') if args.reFill == 'Copy': if torch.sum(mask[:, 0] == 0): h0[mask[:, 0] == 0] = h1[mask[:, 0] == 0] if torch.sum(mask[:, 1] == 0): h1[mask[:, 1] == 0] = h0[mask[:, 1] == 0] elif args.reFill == 'Center': # raise NotImplementedError('') if self.pred_center_cac is None: pass warnings.warn('self.pred_center_cac == None') else: centors = torch.zeros((len(mask), 2, len(self.pred_center_cac[0]))).cuda() centors[mask[:, 0] == 1, 0] = self.pred_center_cac[ self.pred_cac[:torch.sum(mask[:, 0] == 1)]] centors[mask[:, 1] == 1, 1] = self.pred_center_cac[ self.pred_cac[torch.sum(mask[:, 0] == 1):]] if torch.sum(mask[:, 0] == 0): h0[mask[:, 0] == 0] = centors[mask[:, 0] == 0, 1] if torch.sum(mask[:, 1] == 0): h1[mask[:, 1] == 0] = centors[mask[:, 1] == 0, 0] elif args.reFill == 'KnnMapMean': if torch.sum(mask[:, 0] == 0): nearest = get_nearest_k(h1[mask[:, 0] == 0], h1[is_pair], args.reAlignK) h0p = h0[is_pair] h1[mask[:, 0] == 0] = torch.cat([torch.mean(h0p[ns], dim=0) for ns in nearest]) if torch.sum(mask[:, 1] == 0): nearest = get_nearest_k(h0[mask[:, 1] == 0], h0[is_pair], args.reAlignK) h1p = h1[is_pair] h1[mask[:, 1] == 0] = torch.cat([torch.mean(h1p[ns], dim=0) for ns in nearest]) # raise NotImplementedError('') elif args.reFill == 'KnnMean': # 关联对齐, xi1 不变, xi2替换成离xi1最近的k个view2的点的mean if torch.sum(mask[:, 1] == 0): hs0 = h0[mask[:, 1] == 0] he1 = h1[mask[:, 1] == 1] nearest = get_nearest_k(hs0, he1, args.reAlignK) # nearest = torch.argsort(torch.cdist(hs0.cpu(), he1.cpu()), dim=1)[:, :args.reAlignK] h1[mask[:, 1] == 0] = torch.cat([torch.mean(he1[ns], dim=0) for ns in nearest]) # class_labels1[mask[:, 1] == 0] = class_labels1[mask[:, 1] == 1][nearest[:, 0]] if torch.sum(mask[:, 0] == 0): hs1 = h1[mask[:, 0] == 0] he0 = h0[mask[:, 0] == 1] nearest = get_nearest_k(hs1, he0, args.reAlignK) # nearest = torch.argsort(torch.cdist(hs1.cpu(), he0.cpu()), dim=1)[:, :args.reAlignK] h0[mask[:, 0] == 0] = torch.cat([torch.mean(he0[ns], dim=0) for ns in nearest]) # class_labels0[mask[:, 0] == 0] = class_labels0[mask[:, 0] == 1][nearest[:, 0]] ############################################################### # 缺失补全, xi2 = mean(离xi1最近的k个view2的点) # fill_num = k # C = euclidean_dist(h0, h1) # row_idx = C.argsort() # col_idx = (C.t()).argsort() # # Mij denotes the flag of i-th sample in view 0 and j-th sample in view 1 # M = torch.logical_and((mask[:, 0].repeat(test_num, 1)).t(), mask[:, 1].repeat(test_num, 1)) # for i in range(test_num): # idx0 = col_idx[i, :][ # M[col_idx[i, :], i]] # idx for view 0 to sort and find the non-missing neighbors # idx1 = row_idx[i, :][ # M[i, row_idx[i, :]]] # idx for view 1 to sort and find the non-missing neighbors # if len(idx1) != 0 and len(idx0) == 0: # i-th sample in view 1 is missing # avg_fill = h1[idx1[0:fill_num], :].sum(dim=0) / fill_num # cnt += (class_labels1[idx1[0:fill_num]] == class_labels1[i]).sum() # missing_cnt += 1 # recover_out0[i, :] = h0[i, :] # recover_out1[i, :] = avg_fill # missing # elif len(idx0) != 0 and len(idx1) == 0: # avg_fill = h0[idx0[0:fill_num], :].sum(dim=0) / fill_num # cnt += (class_labels0[idx0[0:fill_num]] == class_labels0[i]).sum() # missing_cnt += 1 # recover_out0[i, :] = avg_fill # missing # recover_out1[i, :] = h1[i, :] # elif len(idx0) != 0 and len(idx1) != 0: # recover_out0[i, :] = h0[i, :] # recover_out1[i, :] = h1[i, :] # else: # raise Exception('error') # if setting == 1: # align_out0.extend((recover_out0.cpu()).numpy()) # align_out1.extend((recover_out1.cpu()).numpy()) # continue # else: raise NotImplementedError('') to_realign = torch.logical_and(is_pair == 0, torch.logical_and(mask[:, 1], mask[:, 0])) if args.reAlign == 'KnnMean': # 关联对齐, xi1 不变, xi2替换成离xi1最近的k个view2的点的mean if torch.sum(to_realign): ha1 = h1[to_realign] nearest = get_nearest_k(h0[to_realign], ha1, args.reAlignK) # dist = torch.cdist(h0[to_realign].cpu(), ha1.cpu()) # nearest = torch.argsort(dist, dim=1)[:, :args.reAlignK] h1[to_realign] = torch.cat([torch.mean(ha1[ns], dim=0) for ns in nearest]) # class_labels1[is_pair == 0] = class_labels1[is_pair == 0][nearest[:, 0]] elif args.reAlign == 'Copy': if torch.sum(to_realign): h1[to_realign] = h0[to_realign] # class_labels1[is_pair == 0] = class_labels0[is_pair == 0] elif args.reAlign == 'KnnMapMean': if torch.sum(to_realign): targ_v1 = h1[is_pair] nearest = get_nearest_k(h0[to_realign], h0[is_pair], args.reAlignK) h1[to_realign] = torch.cat([torch.mean(targ_v1[ns], dim=0) for ns in nearest]) # class_labels1[is_pair == 0] = ... elif args.reAlign == 'Ignore': pass else: raise NotImplementedError('') if args.Rev: fea0_rec, fea1_rec = self.decode([h1, h0]) else: fea0_rec, fea1_rec = self.decode([h0, h1]) # if len(fea0_rec[0]) == len(fea1_rec[0]): # fea_rec = torch.concat([fea0_rec, fea1_rec]) # fea = torch.concat([fea0, fea1]) # mask_c = torch.concat([mask[:, 0], mask[:, 1]]) # if torch.sum(mask_c == 0): # rnmse_vec[0].extend( # evaluate.get_rnmse(xs_hat=fea_rec[mask_c == 0], xs=fea[mask_c == 0]).cpu().numpy()) # if torch.sum(mask_c == 1): # rnmse_vec[1].extend( # evaluate.get_rnmse(xs_hat=fea_rec[mask_c == 1], xs=fea[mask_c == 1]).cpu().numpy()) # else: # if torch.sum(mask == 0): # n0_v0 = evaluate.get_rnmse( # xs_hat=fea0_rec[mask[:, 0] == 0], xs=fea0[mask[:, 0] == 0]).cpu().numpy() # n0_v1 = evaluate.get_rnmse( # xs_hat=fea1_rec[mask[:, 1] == 0], xs=fea1[mask[:, 1] == 0]).cpu().numpy() # rnmse_vec[0].extend(n0_v0) # rnmse_vec[0].extend(n0_v1) # if torch.sum(mask == 1): # n1_v0 = evaluate.get_rnmse( # xs_hat=fea0_rec[mask[:, 0] == 1], xs=fea0[mask[:, 0] == 1]).cpu().numpy() # n1_v1 = evaluate.get_rnmse( # xs_hat=fea1_rec[mask[:, 1] == 1], xs=fea1[mask[:, 1] == 1]).cpu().numpy() # rnmse_vec[1].extend(n1_v0) # rnmse_vec[1].extend(n1_v1) g = torch.concat((torch.zeros(len(fea0), device=fea0.device, dtype=torch.int), torch.ones(len(fea1), device=fea0.device, dtype=torch.int))) h = torch.cat([h0, h1]).detach().cpu().numpy() feature_vec.extend(h) data_vec.extend(torch.cat([fea0, fea1]).detach().cpu().numpy()) group_vec.extend(g.cpu().numpy()) type_vec.extend(torch.concat((class_labels0, class_labels1)).numpy()) inf_data_t = time.time() feature_vec = np.array(feature_vec) data_vec = np.array(data_vec) feature_vec_cluster = np.array(feature_vec_cluster) is_pair_all = np.array(is_pair_all) feature_vec_classification = np.array(feature_vec_classification) group_vec = np.array(group_vec) group_vec_cluster = np.array(group_vec_cluster) type_vec = np.array(type_vec) type_vec_cluster = np.array(type_vec_cluster) rnmse_vec[0] = np.array(rnmse_vec[0]) rnmse_vec[1] = np.array(rnmse_vec[1]) kmeans_time = TimeOperator.Timer() if args.ShowReconstruct: if args.dataset == 'MNISTUSPS': dims = [np.product(d.data.shape[1:]) for d in test_dataloader.dataset.datasets] data_list = [np.asarray(it.data, dtype=np.float32) for it in test_dataloader.dataset.datasets] Y = test_dataloader.dataset.datasets[0].targets else: dims = [d.shape[1] for d in test_dataloader.dataset.data] data_list = [np.asarray(it, dtype=np.float32) for it in test_dataloader.dataset.data] Y = test_dataloader.dataset.class_labels0 mask = test_dataloader.dataset.mask n_per_cat = 10 rec0, rec1 = self.decode([ torch.from_numpy(feature_vec[group_vec == 0]).cuda(), torch.from_numpy(feature_vec[group_vec == 1]).cuda()]) rec0 = rec0.detach().cpu().numpy() rec1 = rec1.detach().cpu().numpy() show_img = np.asarray([]) inds_map = np.asarray([]) for v in range(2): col = np.asarray([]) inds_map_col = np.asarray([]) for y in range(10): inds = np.arange(len(Y))[ np.logical_and(np.logical_and(mask[:, v] == 1, mask[:, 1 - v] == 0), Y == y) ] np.random.shuffle(inds) assert len(inds) >= n_per_cat inds = inds[:n_per_cat] raw_imgs = data_list[v][inds] missing_imgs = data_list[1 - v][inds] rec_imgs = [rec0, rec1][v][inds] rec_imgs_miss = [rec0, rec1][1 - v][inds] pack = np.asarray( [raw_imgs, rec_imgs, missing_imgs, rec_imgs_miss]).reshape([-1, n_per_cat, 28, 28]) if len(col): col = np.concatenate([col, pack], axis=0) else: col = pack if len(inds_map_col): inds_map_col = np.concatenate([inds_map_col, inds.reshape([1, -1])], axis=0) else: inds_map_col = inds.reshape([1, -1]) if len(show_img): show_img = np.concatenate([show_img, col], axis=1) else: show_img = col if len(inds_map): inds_map = np.concatenate([inds_map, inds_map_col], axis=1) else: inds_map = inds_map_col plot_heat_map(inds_map, show=True, fig_path='/xlearning/pengxin/Temp/MissingRecIM.svg') visualize_image(show_img, show=True, fig_path='/xlearning/pengxin/Temp/MissingRec.svg') selected_ind = [ [8, 2, 8, 9, 7, 2, 5, 9, 9, 9], [0, 2, 2, 3, 5, 7, 7, 9, 7, 0], ] # ToMouxin inds_to_mouxin = [ [im[si] for im, si in zip(inds_map[:, :n_per_cat], selected_ind[0])], [im[si] for im, si in zip(inds_map[:, n_per_cat:], selected_ind[1])], ] re_dt = np.load( '/xlearning/pengxin/Checkpoints/MultiClustering/RunSets/230105/IMvC_RunSet0114_Ablation_FakeSampleWise/ --QuickConfig X50C50 --dataset MNISTUSPS --loss_sim_contras 0.02 --seed 1998/SampleCache/Np.npz') np.savez('/xlearning/pengxin/Temp/MNISTUSPS_show.npz', feature_vec=np.asarray([ re_dt['d0_data'][inds_to_mouxin[0]], re_dt['d1_data'][inds_to_mouxin[1]] ])) selected_ind_global = np.concatenate( (np.asarray(selected_ind[0]).reshape([-1, 1]), np.asarray(selected_ind[1]).reshape([-1, 1]) + n_per_cat), axis=1 ) show_img_final = np.concatenate( [show_img[4 * i:4 * i + 4, selected_ind_global[i]] for i in range(len(selected_ind_global))], axis=1 )[:, [i * 2 for i in range(10)] + [i * 2 + 1 for i in range(10)]] visualize_image(show_img_final, show=True, fig_path='/xlearning/pengxin/Temp/MissingRecFinal.svg') return def cluster_and_measure(features, types, groups, row_pred=False): kst = time.time() centroids = torch.from_numpy(kmeans(features, self.class_num)) if args.ElActivationType in ['Normalize', 'BnNormalize', 'BnReNormalize']: centroids = F.normalize(centroids, dim=1) pred_vec = np.argmax(self.soft_ass(torch.from_numpy(features), centroids).numpy(), axis=1)
pred_adjusted, met = evaluate2(features, pred_vec, types, groups)
13
2023-12-21 08:50:36+00:00
16k
Azure-Samples/functions-python-web-crawler
.venv/Lib/site-packages/urllib3/connection.py
[ { "identifier": "HTTPHeaderDict", "path": ".venv/Lib/site-packages/urllib3/_collections.py", "snippet": "class HTTPHeaderDict(typing.MutableMapping[str, str]):\n \"\"\"\n :param headers:\n An iterable of field-value pairs. Must not contain multiple field names\n when compared case-insensitively.\n\n :param kwargs:\n Additional field-value pairs to pass in to ``dict.update``.\n\n A ``dict`` like container for storing HTTP Headers.\n\n Field names are stored and compared case-insensitively in compliance with\n RFC 7230. Iteration provides the first case-sensitive key seen for each\n case-insensitive pair.\n\n Using ``__setitem__`` syntax overwrites fields that compare equal\n case-insensitively in order to maintain ``dict``'s api. For fields that\n compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``\n in a loop.\n\n If multiple fields that are equal case-insensitively are passed to the\n constructor or ``.update``, the behavior is undefined and some will be\n lost.\n\n >>> headers = HTTPHeaderDict()\n >>> headers.add('Set-Cookie', 'foo=bar')\n >>> headers.add('set-cookie', 'baz=quxx')\n >>> headers['content-length'] = '7'\n >>> headers['SET-cookie']\n 'foo=bar, baz=quxx'\n >>> headers['Content-Length']\n '7'\n \"\"\"\n\n _container: typing.MutableMapping[str, list[str]]\n\n def __init__(self, headers: ValidHTTPHeaderSource | None = None, **kwargs: str):\n super().__init__()\n self._container = {} # 'dict' is insert-ordered\n if headers is not None:\n if isinstance(headers, HTTPHeaderDict):\n self._copy_from(headers)\n else:\n self.extend(headers)\n if kwargs:\n self.extend(kwargs)\n\n def __setitem__(self, key: str, val: str) -> None:\n # avoid a bytes/str comparison by decoding before httplib\n if isinstance(key, bytes):\n key = key.decode(\"latin-1\")\n self._container[key.lower()] = [key, val]\n\n def __getitem__(self, key: str) -> str:\n val = self._container[key.lower()]\n return \", \".join(val[1:])\n\n def __delitem__(self, key: str) -> None:\n del self._container[key.lower()]\n\n def __contains__(self, key: object) -> bool:\n if isinstance(key, str):\n return key.lower() in self._container\n return False\n\n def setdefault(self, key: str, default: str = \"\") -> str:\n return super().setdefault(key, default)\n\n def __eq__(self, other: object) -> bool:\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return False\n else:\n other_as_http_header_dict = type(self)(maybe_constructable)\n\n return {k.lower(): v for k, v in self.itermerged()} == {\n k.lower(): v for k, v in other_as_http_header_dict.itermerged()\n }\n\n def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)\n\n def __len__(self) -> int:\n return len(self._container)\n\n def __iter__(self) -> typing.Iterator[str]:\n # Only provide the originally cased names\n for vals in self._container.values():\n yield vals[0]\n\n def discard(self, key: str) -> None:\n try:\n del self[key]\n except KeyError:\n pass\n\n def add(self, key: str, val: str, *, combine: bool = False) -> None:\n \"\"\"Adds a (name, value) pair, doesn't overwrite the value if it already\n exists.\n\n If this is called with combine=True, instead of adding a new header value\n as a distinct item during iteration, this will instead append the value to\n any existing header value with a comma. If no existing header value exists\n for the key, then the value will simply be added, ignoring the combine parameter.\n\n >>> headers = HTTPHeaderDict(foo='bar')\n >>> headers.add('Foo', 'baz')\n >>> headers['foo']\n 'bar, baz'\n >>> list(headers.items())\n [('foo', 'bar'), ('foo', 'baz')]\n >>> headers.add('foo', 'quz', combine=True)\n >>> list(headers.items())\n [('foo', 'bar, baz, quz')]\n \"\"\"\n # avoid a bytes/str comparison by decoding before httplib\n if isinstance(key, bytes):\n key = key.decode(\"latin-1\")\n key_lower = key.lower()\n new_vals = [key, val]\n # Keep the common case aka no item present as fast as possible\n vals = self._container.setdefault(key_lower, new_vals)\n if new_vals is not vals:\n # if there are values here, then there is at least the initial\n # key/value pair\n assert len(vals) >= 2\n if combine:\n vals[-1] = vals[-1] + \", \" + val\n else:\n vals.append(val)\n\n def extend(self, *args: ValidHTTPHeaderSource, **kwargs: str) -> None:\n \"\"\"Generic import function for any type of header-like object.\n Adapted version of MutableMapping.update in order to insert items\n with self.add instead of self.__setitem__\n \"\"\"\n if len(args) > 1:\n raise TypeError(\n f\"extend() takes at most 1 positional arguments ({len(args)} given)\"\n )\n other = args[0] if len(args) >= 1 else ()\n\n if isinstance(other, HTTPHeaderDict):\n for key, val in other.iteritems():\n self.add(key, val)\n elif isinstance(other, typing.Mapping):\n for key, val in other.items():\n self.add(key, val)\n elif isinstance(other, typing.Iterable):\n other = typing.cast(typing.Iterable[typing.Tuple[str, str]], other)\n for key, value in other:\n self.add(key, value)\n elif hasattr(other, \"keys\") and hasattr(other, \"__getitem__\"):\n # THIS IS NOT A TYPESAFE BRANCH\n # In this branch, the object has a `keys` attr but is not a Mapping or any of\n # the other types indicated in the method signature. We do some stuff with\n # it as though it partially implements the Mapping interface, but we're not\n # doing that stuff safely AT ALL.\n for key in other.keys():\n self.add(key, other[key])\n\n for key, value in kwargs.items():\n self.add(key, value)\n\n @typing.overload\n def getlist(self, key: str) -> list[str]:\n ...\n\n @typing.overload\n def getlist(self, key: str, default: _DT) -> list[str] | _DT:\n ...\n\n def getlist(\n self, key: str, default: _Sentinel | _DT = _Sentinel.not_passed\n ) -> list[str] | _DT:\n \"\"\"Returns a list of all the values for the named field. Returns an\n empty list if the key doesn't exist.\"\"\"\n try:\n vals = self._container[key.lower()]\n except KeyError:\n if default is _Sentinel.not_passed:\n # _DT is unbound; empty list is instance of List[str]\n return []\n # _DT is bound; default is instance of _DT\n return default\n else:\n # _DT may or may not be bound; vals[1:] is instance of List[str], which\n # meets our external interface requirement of `Union[List[str], _DT]`.\n return vals[1:]\n\n def _prepare_for_method_change(self) -> Self:\n \"\"\"\n Remove content-specific header fields before changing the request\n method to GET or HEAD according to RFC 9110, Section 15.4.\n \"\"\"\n content_specific_headers = [\n \"Content-Encoding\",\n \"Content-Language\",\n \"Content-Location\",\n \"Content-Type\",\n \"Content-Length\",\n \"Digest\",\n \"Last-Modified\",\n ]\n for header in content_specific_headers:\n self.discard(header)\n return self\n\n # Backwards compatibility for httplib\n getheaders = getlist\n getallmatchingheaders = getlist\n iget = getlist\n\n # Backwards compatibility for http.cookiejar\n get_all = getlist\n\n def __repr__(self) -> str:\n return f\"{type(self).__name__}({dict(self.itermerged())})\"\n\n def _copy_from(self, other: HTTPHeaderDict) -> None:\n for key in other:\n val = other.getlist(key)\n self._container[key.lower()] = [key, *val]\n\n def copy(self) -> HTTPHeaderDict:\n clone = type(self)()\n clone._copy_from(self)\n return clone\n\n def iteritems(self) -> typing.Iterator[tuple[str, str]]:\n \"\"\"Iterate over all header lines, including duplicate ones.\"\"\"\n for key in self:\n vals = self._container[key.lower()]\n for val in vals[1:]:\n yield vals[0], val\n\n def itermerged(self) -> typing.Iterator[tuple[str, str]]:\n \"\"\"Iterate over all headers, merging duplicate ones together.\"\"\"\n for key in self:\n val = self._container[key.lower()]\n yield val[0], \", \".join(val[1:])\n\n def items(self) -> HTTPHeaderDictItemView: # type: ignore[override]\n return HTTPHeaderDictItemView(self)\n\n def _has_value_for_header(self, header_name: str, potential_value: str) -> bool:\n if header_name in self:\n return potential_value in self._container[header_name.lower()][1:]\n return False\n\n def __ior__(self, other: object) -> HTTPHeaderDict:\n # Supports extending a header dict in-place using operator |=\n # combining items with add instead of __setitem__\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return NotImplemented\n self.extend(maybe_constructable)\n return self\n\n def __or__(self, other: object) -> HTTPHeaderDict:\n # Supports merging header dicts using operator |\n # combining items with add instead of __setitem__\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return NotImplemented\n result = self.copy()\n result.extend(maybe_constructable)\n return result\n\n def __ror__(self, other: object) -> HTTPHeaderDict:\n # Supports merging header dicts using operator | when other is on left side\n # combining items with add instead of __setitem__\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return NotImplemented\n result = type(self)(maybe_constructable)\n result.extend(self)\n return result" }, { "identifier": "assert_header_parsing", "path": ".venv/Lib/site-packages/urllib3/util/response.py", "snippet": "def assert_header_parsing(headers: httplib.HTTPMessage) -> None:\n \"\"\"\n Asserts whether all headers have been successfully parsed.\n Extracts encountered errors from the result of parsing headers.\n\n Only works on Python 3.\n\n :param http.client.HTTPMessage headers: Headers to verify.\n\n :raises urllib3.exceptions.HeaderParsingError:\n If parsing errors are found.\n \"\"\"\n\n # This will fail silently if we pass in the wrong kind of parameter.\n # To make debugging easier add an explicit check.\n if not isinstance(headers, httplib.HTTPMessage):\n raise TypeError(f\"expected httplib.Message, got {type(headers)}.\")\n\n unparsed_data = None\n\n # get_payload is actually email.message.Message.get_payload;\n # we're only interested in the result if it's not a multipart message\n if not headers.is_multipart():\n payload = headers.get_payload()\n\n if isinstance(payload, (bytes, str)):\n unparsed_data = payload\n\n # httplib is assuming a response body is available\n # when parsing headers even when httplib only sends\n # header data to parse_headers() This results in\n # defects on multipart responses in particular.\n # See: https://github.com/urllib3/urllib3/issues/800\n\n # So we ignore the following defects:\n # - StartBoundaryNotFoundDefect:\n # The claimed start boundary was never found.\n # - MultipartInvariantViolationDefect:\n # A message claimed to be a multipart but no subparts were found.\n defects = [\n defect\n for defect in headers.defects\n if not isinstance(\n defect, (StartBoundaryNotFoundDefect, MultipartInvariantViolationDefect)\n )\n ]\n\n if defects or unparsed_data:\n raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)" }, { "identifier": "_DEFAULT_TIMEOUT", "path": ".venv/Lib/site-packages/urllib3/util/timeout.py", "snippet": "_DEFAULT_TIMEOUT: Final[_TYPE_DEFAULT] = _TYPE_DEFAULT.token" }, { "identifier": "_TYPE_TIMEOUT", "path": ".venv/Lib/site-packages/urllib3/util/timeout.py", "snippet": "_TYPE_TIMEOUT = typing.Optional[typing.Union[float, _TYPE_DEFAULT]]" }, { "identifier": "Timeout", "path": ".venv/Lib/site-packages/urllib3/util/timeout.py", "snippet": "class Timeout:\n \"\"\"Timeout configuration.\n\n Timeouts can be defined as a default for a pool:\n\n .. code-block:: python\n\n import urllib3\n\n timeout = urllib3.util.Timeout(connect=2.0, read=7.0)\n\n http = urllib3.PoolManager(timeout=timeout)\n\n resp = http.request(\"GET\", \"https://example.com/\")\n\n print(resp.status)\n\n Or per-request (which overrides the default for the pool):\n\n .. code-block:: python\n\n response = http.request(\"GET\", \"https://example.com/\", timeout=Timeout(10))\n\n Timeouts can be disabled by setting all the parameters to ``None``:\n\n .. code-block:: python\n\n no_timeout = Timeout(connect=None, read=None)\n response = http.request(\"GET\", \"https://example.com/\", timeout=no_timeout)\n\n\n :param total:\n This combines the connect and read timeouts into one; the read timeout\n will be set to the time leftover from the connect attempt. In the\n event that both a connect timeout and a total are specified, or a read\n timeout and a total are specified, the shorter timeout will be applied.\n\n Defaults to None.\n\n :type total: int, float, or None\n\n :param connect:\n The maximum amount of time (in seconds) to wait for a connection\n attempt to a server to succeed. Omitting the parameter will default the\n connect timeout to the system default, probably `the global default\n timeout in socket.py\n <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.\n None will set an infinite timeout for connection attempts.\n\n :type connect: int, float, or None\n\n :param read:\n The maximum amount of time (in seconds) to wait between consecutive\n read operations for a response from the server. Omitting the parameter\n will default the read timeout to the system default, probably `the\n global default timeout in socket.py\n <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.\n None will set an infinite timeout.\n\n :type read: int, float, or None\n\n .. note::\n\n Many factors can affect the total amount of time for urllib3 to return\n an HTTP response.\n\n For example, Python's DNS resolver does not obey the timeout specified\n on the socket. Other factors that can affect total request time include\n high CPU load, high swap, the program running at a low priority level,\n or other behaviors.\n\n In addition, the read and total timeouts only measure the time between\n read operations on the socket connecting the client and the server,\n not the total amount of time for the request to return a complete\n response. For most requests, the timeout is raised because the server\n has not sent the first byte in the specified time. This is not always\n the case; if a server streams one byte every fifteen seconds, a timeout\n of 20 seconds will not trigger, even though the request will take\n several minutes to complete.\n\n If your goal is to cut off any request after a set amount of wall clock\n time, consider having a second \"watcher\" thread to cut off a slow\n request.\n \"\"\"\n\n #: A sentinel object representing the default timeout value\n DEFAULT_TIMEOUT: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT\n\n def __init__(\n self,\n total: _TYPE_TIMEOUT = None,\n connect: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n read: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n ) -> None:\n self._connect = self._validate_timeout(connect, \"connect\")\n self._read = self._validate_timeout(read, \"read\")\n self.total = self._validate_timeout(total, \"total\")\n self._start_connect: float | None = None\n\n def __repr__(self) -> str:\n return f\"{type(self).__name__}(connect={self._connect!r}, read={self._read!r}, total={self.total!r})\"\n\n # __str__ provided for backwards compatibility\n __str__ = __repr__\n\n @staticmethod\n def resolve_default_timeout(timeout: _TYPE_TIMEOUT) -> float | None:\n return getdefaulttimeout() if timeout is _DEFAULT_TIMEOUT else timeout\n\n @classmethod\n def _validate_timeout(cls, value: _TYPE_TIMEOUT, name: str) -> _TYPE_TIMEOUT:\n \"\"\"Check that a timeout attribute is valid.\n\n :param value: The timeout value to validate\n :param name: The name of the timeout attribute to validate. This is\n used to specify in error messages.\n :return: The validated and casted version of the given value.\n :raises ValueError: If it is a numeric value less than or equal to\n zero, or the type is not an integer, float, or None.\n \"\"\"\n if value is None or value is _DEFAULT_TIMEOUT:\n return value\n\n if isinstance(value, bool):\n raise ValueError(\n \"Timeout cannot be a boolean value. It must \"\n \"be an int, float or None.\"\n )\n try:\n float(value)\n except (TypeError, ValueError):\n raise ValueError(\n \"Timeout value %s was %s, but it must be an \"\n \"int, float or None.\" % (name, value)\n ) from None\n\n try:\n if value <= 0:\n raise ValueError(\n \"Attempted to set %s timeout to %s, but the \"\n \"timeout cannot be set to a value less \"\n \"than or equal to 0.\" % (name, value)\n )\n except TypeError:\n raise ValueError(\n \"Timeout value %s was %s, but it must be an \"\n \"int, float or None.\" % (name, value)\n ) from None\n\n return value\n\n @classmethod\n def from_float(cls, timeout: _TYPE_TIMEOUT) -> Timeout:\n \"\"\"Create a new Timeout from a legacy timeout value.\n\n The timeout value used by httplib.py sets the same timeout on the\n connect(), and recv() socket requests. This creates a :class:`Timeout`\n object that sets the individual timeouts to the ``timeout`` value\n passed to this function.\n\n :param timeout: The legacy timeout value.\n :type timeout: integer, float, :attr:`urllib3.util.Timeout.DEFAULT_TIMEOUT`, or None\n :return: Timeout object\n :rtype: :class:`Timeout`\n \"\"\"\n return Timeout(read=timeout, connect=timeout)\n\n def clone(self) -> Timeout:\n \"\"\"Create a copy of the timeout object\n\n Timeout properties are stored per-pool but each request needs a fresh\n Timeout object to ensure each one has its own start/stop configured.\n\n :return: a copy of the timeout object\n :rtype: :class:`Timeout`\n \"\"\"\n # We can't use copy.deepcopy because that will also create a new object\n # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to\n # detect the user default.\n return Timeout(connect=self._connect, read=self._read, total=self.total)\n\n def start_connect(self) -> float:\n \"\"\"Start the timeout clock, used during a connect() attempt\n\n :raises urllib3.exceptions.TimeoutStateError: if you attempt\n to start a timer that has been started already.\n \"\"\"\n if self._start_connect is not None:\n raise TimeoutStateError(\"Timeout timer has already been started.\")\n self._start_connect = time.monotonic()\n return self._start_connect\n\n def get_connect_duration(self) -> float:\n \"\"\"Gets the time elapsed since the call to :meth:`start_connect`.\n\n :return: Elapsed time in seconds.\n :rtype: float\n :raises urllib3.exceptions.TimeoutStateError: if you attempt\n to get duration for a timer that hasn't been started.\n \"\"\"\n if self._start_connect is None:\n raise TimeoutStateError(\n \"Can't get connect duration for timer that has not started.\"\n )\n return time.monotonic() - self._start_connect\n\n @property\n def connect_timeout(self) -> _TYPE_TIMEOUT:\n \"\"\"Get the value to use when setting a connection timeout.\n\n This will be a positive float or integer, the value None\n (never timeout), or the default system timeout.\n\n :return: Connect timeout.\n :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None\n \"\"\"\n if self.total is None:\n return self._connect\n\n if self._connect is None or self._connect is _DEFAULT_TIMEOUT:\n return self.total\n\n return min(self._connect, self.total) # type: ignore[type-var]\n\n @property\n def read_timeout(self) -> float | None:\n \"\"\"Get the value for the read timeout.\n\n This assumes some time has elapsed in the connection timeout and\n computes the read timeout appropriately.\n\n If self.total is set, the read timeout is dependent on the amount of\n time taken by the connect timeout. If the connection time has not been\n established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be\n raised.\n\n :return: Value to use for the read timeout.\n :rtype: int, float or None\n :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`\n has not yet been called on this object.\n \"\"\"\n if (\n self.total is not None\n and self.total is not _DEFAULT_TIMEOUT\n and self._read is not None\n and self._read is not _DEFAULT_TIMEOUT\n ):\n # In case the connect timeout has not yet been established.\n if self._start_connect is None:\n return self._read\n return max(0, min(self.total - self.get_connect_duration(), self._read))\n elif self.total is not None and self.total is not _DEFAULT_TIMEOUT:\n return max(0, self.total - self.get_connect_duration())\n else:\n return self.resolve_default_timeout(self._read)" }, { "identifier": "to_str", "path": ".venv/Lib/site-packages/urllib3/util/util.py", "snippet": "def to_str(\n x: str | bytes, encoding: str | None = None, errors: str | None = None\n) -> str:\n if isinstance(x, str):\n return x\n elif not isinstance(x, bytes):\n raise TypeError(f\"not expecting type {type(x).__name__}\")\n if encoding or errors:\n return x.decode(encoding or \"utf-8\", errors=errors or \"strict\")\n return x.decode()" }, { "identifier": "wait_for_read", "path": ".venv/Lib/site-packages/urllib3/util/wait.py", "snippet": "def wait_for_read(sock: socket.socket, timeout: float | None = None) -> bool:\n \"\"\"Waits for reading to be available on a given socket.\n Returns True if the socket is readable, or False if the timeout expired.\n \"\"\"\n return wait_for_socket(sock, read=True, timeout=timeout)" }, { "identifier": "_TYPE_BODY", "path": ".venv/Lib/site-packages/urllib3/_base_connection.py", "snippet": "_TYPE_BODY = typing.Union[bytes, typing.IO[typing.Any], typing.Iterable[bytes], str]" }, { "identifier": "ProxyConfig", "path": ".venv/Lib/site-packages/urllib3/_base_connection.py", "snippet": "class ProxyConfig(typing.NamedTuple):\n ssl_context: ssl.SSLContext | None\n use_forwarding_for_https: bool\n assert_hostname: None | str | Literal[False]\n assert_fingerprint: str | None" }, { "identifier": "_ResponseOptions", "path": ".venv/Lib/site-packages/urllib3/_base_connection.py", "snippet": "class _ResponseOptions(typing.NamedTuple):\n # TODO: Remove this in favor of a better\n # HTTP request/response lifecycle tracking.\n request_method: str\n request_url: str\n preload_content: bool\n decode_content: bool\n enforce_content_length: bool" }, { "identifier": "__version__", "path": ".venv/Lib/site-packages/urllib3/_version.py", "snippet": "" }, { "identifier": "ConnectTimeoutError", "path": ".venv/Lib/site-packages/urllib3/exceptions.py", "snippet": "class ConnectTimeoutError(TimeoutError):\n \"\"\"Raised when a socket timeout occurs while connecting to a server\"\"\"" }, { "identifier": "HeaderParsingError", "path": ".venv/Lib/site-packages/urllib3/exceptions.py", "snippet": "class HeaderParsingError(HTTPError):\n \"\"\"Raised by assert_header_parsing, but we convert it to a log.warning statement.\"\"\"\n\n def __init__(\n self, defects: list[MessageDefect], unparsed_data: bytes | str | None\n ) -> None:\n message = f\"{defects or 'Unknown'}, unparsed data: {unparsed_data!r}\"\n super().__init__(message)" }, { "identifier": "NameResolutionError", "path": ".venv/Lib/site-packages/urllib3/exceptions.py", "snippet": "class NameResolutionError(NewConnectionError):\n \"\"\"Raised when host name resolution fails.\"\"\"\n\n def __init__(self, host: str, conn: HTTPConnection, reason: socket.gaierror):\n message = f\"Failed to resolve '{host}' ({reason})\"\n super().__init__(conn, message)" }, { "identifier": "NewConnectionError", "path": ".venv/Lib/site-packages/urllib3/exceptions.py", "snippet": "class NewConnectionError(ConnectTimeoutError, HTTPError):\n \"\"\"Raised when we fail to establish a new connection. Usually ECONNREFUSED.\"\"\"\n\n def __init__(self, conn: HTTPConnection, message: str) -> None:\n self.conn = conn\n super().__init__(f\"{conn}: {message}\")\n\n @property\n def pool(self) -> HTTPConnection:\n warnings.warn(\n \"The 'pool' property is deprecated and will be removed \"\n \"in urllib3 v2.1.0. Use 'conn' instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n\n return self.conn" }, { "identifier": "ProxyError", "path": ".venv/Lib/site-packages/urllib3/exceptions.py", "snippet": "class ProxyError(HTTPError):\n \"\"\"Raised when the connection to a proxy fails.\"\"\"\n\n # The original error is also available as __cause__.\n original_error: Exception\n\n def __init__(self, message: str, error: Exception) -> None:\n super().__init__(message, error)\n self.original_error = error" }, { "identifier": "SystemTimeWarning", "path": ".venv/Lib/site-packages/urllib3/exceptions.py", "snippet": "class SystemTimeWarning(SecurityWarning):\n \"\"\"Warned when system time is suspected to be wrong\"\"\"" }, { "identifier": "connection", "path": ".venv/Lib/site-packages/urllib3/util/connection.py", "snippet": "_TYPE_SOCKET_OPTIONS = typing.Sequence[typing.Tuple[int, int, typing.Union[int, bytes]]]\nHAS_IPV6 = _has_ipv6(\"::1\")\ndef is_connection_dropped(conn: BaseHTTPConnection) -> bool: # Platform-specific\ndef create_connection(\n address: tuple[str, int],\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n source_address: tuple[str, int] | None = None,\n socket_options: _TYPE_SOCKET_OPTIONS | None = None,\n) -> socket.socket:\ndef _set_socket_options(\n sock: socket.socket, options: _TYPE_SOCKET_OPTIONS | None\n) -> None:\ndef allowed_gai_family() -> socket.AddressFamily:\ndef _has_ipv6(host: str) -> bool:" }, { "identifier": "ssl_", "path": ".venv/Lib/site-packages/urllib3/util/ssl_.py", "snippet": "HAS_NEVER_CHECK_COMMON_NAME = False\nIS_PYOPENSSL = False\nALPN_PROTOCOLS = [\"http/1.1\"]\n_TYPE_VERSION_INFO = typing.Tuple[int, int, int, str, int]\nHASHFUNC_MAP = {32: md5, 40: sha1, 64: sha256}\n_SSL_VERSION_TO_TLS_VERSION: dict[int, int] = {}\n HAS_NEVER_CHECK_COMMON_NAME = False\n OP_NO_COMPRESSION = 0x20000 # type: ignore[assignment]\n OP_NO_TICKET = 0x4000 # type: ignore[assignment]\n PROTOCOL_TLS_CLIENT = 16 # type: ignore[assignment]\n_TYPE_PEER_CERT_RET = typing.Union[\"_TYPE_PEER_CERT_RET_DICT\", bytes, None]\ndef _is_bpo_43522_fixed(\n implementation_name: str,\n version_info: _TYPE_VERSION_INFO,\n pypy_version_info: _TYPE_VERSION_INFO | None,\n) -> bool:\ndef _is_has_never_check_common_name_reliable(\n openssl_version: str,\n openssl_version_number: int,\n implementation_name: str,\n version_info: _TYPE_VERSION_INFO,\n pypy_version_info: _TYPE_VERSION_INFO | None,\n) -> bool:\ndef assert_fingerprint(cert: bytes | None, fingerprint: str) -> None:\ndef resolve_cert_reqs(candidate: None | int | str) -> VerifyMode:\ndef resolve_ssl_version(candidate: None | int | str) -> int:\ndef create_urllib3_context(\n ssl_version: int | None = None,\n cert_reqs: int | None = None,\n options: int | None = None,\n ciphers: str | None = None,\n ssl_minimum_version: int | None = None,\n ssl_maximum_version: int | None = None,\n) -> ssl.SSLContext:\ndef ssl_wrap_socket(\n sock: socket.socket,\n keyfile: str | None = ...,\n certfile: str | None = ...,\n cert_reqs: int | None = ...,\n ca_certs: str | None = ...,\n server_hostname: str | None = ...,\n ssl_version: int | None = ...,\n ciphers: str | None = ...,\n ssl_context: ssl.SSLContext | None = ...,\n ca_cert_dir: str | None = ...,\n key_password: str | None = ...,\n ca_cert_data: None | str | bytes = ...,\n tls_in_tls: Literal[False] = ...,\n) -> ssl.SSLSocket:\ndef ssl_wrap_socket(\n sock: socket.socket,\n keyfile: str | None = ...,\n certfile: str | None = ...,\n cert_reqs: int | None = ...,\n ca_certs: str | None = ...,\n server_hostname: str | None = ...,\n ssl_version: int | None = ...,\n ciphers: str | None = ...,\n ssl_context: ssl.SSLContext | None = ...,\n ca_cert_dir: str | None = ...,\n key_password: str | None = ...,\n ca_cert_data: None | str | bytes = ...,\n tls_in_tls: bool = ...,\n) -> ssl.SSLSocket | SSLTransportType:\ndef ssl_wrap_socket(\n sock: socket.socket,\n keyfile: str | None = None,\n certfile: str | None = None,\n cert_reqs: int | None = None,\n ca_certs: str | None = None,\n server_hostname: str | None = None,\n ssl_version: int | None = None,\n ciphers: str | None = None,\n ssl_context: ssl.SSLContext | None = None,\n ca_cert_dir: str | None = None,\n key_password: str | None = None,\n ca_cert_data: None | str | bytes = None,\n tls_in_tls: bool = False,\n) -> ssl.SSLSocket | SSLTransportType:\ndef is_ipaddress(hostname: str | bytes) -> bool:\ndef _is_key_file_encrypted(key_file: str) -> bool:\ndef _ssl_wrap_socket_impl(\n sock: socket.socket,\n ssl_context: ssl.SSLContext,\n tls_in_tls: bool,\n server_hostname: str | None = None,\n) -> ssl.SSLSocket | SSLTransportType:\n class _TYPE_PEER_CERT_RET_DICT(TypedDict, total=False):" }, { "identifier": "SKIP_HEADER", "path": ".venv/Lib/site-packages/urllib3/util/request.py", "snippet": "SKIP_HEADER = \"@@@SKIP_HEADER@@@\"" }, { "identifier": "SKIPPABLE_HEADERS", "path": ".venv/Lib/site-packages/urllib3/util/request.py", "snippet": "SKIPPABLE_HEADERS = frozenset([\"accept-encoding\", \"host\", \"user-agent\"])" }, { "identifier": "body_to_chunks", "path": ".venv/Lib/site-packages/urllib3/util/request.py", "snippet": "def body_to_chunks(\n body: typing.Any | None, method: str, blocksize: int\n) -> ChunksAndContentLength:\n \"\"\"Takes the HTTP request method, body, and blocksize and\n transforms them into an iterable of chunks to pass to\n socket.sendall() and an optional 'Content-Length' header.\n\n A 'Content-Length' of 'None' indicates the length of the body\n can't be determined so should use 'Transfer-Encoding: chunked'\n for framing instead.\n \"\"\"\n\n chunks: typing.Iterable[bytes] | None\n content_length: int | None\n\n # No body, we need to make a recommendation on 'Content-Length'\n # based on whether that request method is expected to have\n # a body or not.\n if body is None:\n chunks = None\n if method.upper() not in _METHODS_NOT_EXPECTING_BODY:\n content_length = 0\n else:\n content_length = None\n\n # Bytes or strings become bytes\n elif isinstance(body, (str, bytes)):\n chunks = (to_bytes(body),)\n content_length = len(chunks[0])\n\n # File-like object, TODO: use seek() and tell() for length?\n elif hasattr(body, \"read\"):\n\n def chunk_readable() -> typing.Iterable[bytes]:\n nonlocal body, blocksize\n encode = isinstance(body, io.TextIOBase)\n while True:\n datablock = body.read(blocksize)\n if not datablock:\n break\n if encode:\n datablock = datablock.encode(\"iso-8859-1\")\n yield datablock\n\n chunks = chunk_readable()\n content_length = None\n\n # Otherwise we need to start checking via duck-typing.\n else:\n try:\n # Check if the body implements the buffer API.\n mv = memoryview(body)\n except TypeError:\n try:\n # Check if the body is an iterable\n chunks = iter(body)\n content_length = None\n except TypeError:\n raise TypeError(\n f\"'body' must be a bytes-like object, file-like \"\n f\"object, or iterable. Instead was {body!r}\"\n ) from None\n else:\n # Since it implements the buffer API can be passed directly to socket.sendall()\n chunks = (body,)\n content_length = mv.nbytes\n\n return ChunksAndContentLength(chunks=chunks, content_length=content_length)" }, { "identifier": "assert_fingerprint", "path": ".venv/Lib/site-packages/urllib3/util/ssl_.py", "snippet": "def assert_fingerprint(cert: bytes | None, fingerprint: str) -> None:\n \"\"\"\n Checks if given fingerprint matches the supplied certificate.\n\n :param cert:\n Certificate as bytes object.\n :param fingerprint:\n Fingerprint as string of hexdigits, can be interspersed by colons.\n \"\"\"\n\n if cert is None:\n raise SSLError(\"No certificate for the peer.\")\n\n fingerprint = fingerprint.replace(\":\", \"\").lower()\n digest_length = len(fingerprint)\n hashfunc = HASHFUNC_MAP.get(digest_length)\n if not hashfunc:\n raise SSLError(f\"Fingerprint of invalid length: {fingerprint}\")\n\n # We need encode() here for py32; works on py2 and p33.\n fingerprint_bytes = unhexlify(fingerprint.encode())\n\n cert_digest = hashfunc(cert).digest()\n\n if not hmac.compare_digest(cert_digest, fingerprint_bytes):\n raise SSLError(\n f'Fingerprints did not match. Expected \"{fingerprint}\", got \"{cert_digest.hex()}\"'\n )" }, { "identifier": "create_urllib3_context", "path": ".venv/Lib/site-packages/urllib3/util/ssl_.py", "snippet": "def create_urllib3_context(\n ssl_version: int | None = None,\n cert_reqs: int | None = None,\n options: int | None = None,\n ciphers: str | None = None,\n ssl_minimum_version: int | None = None,\n ssl_maximum_version: int | None = None,\n) -> ssl.SSLContext:\n \"\"\"Creates and configures an :class:`ssl.SSLContext` instance for use with urllib3.\n\n :param ssl_version:\n The desired protocol version to use. This will default to\n PROTOCOL_SSLv23 which will negotiate the highest protocol that both\n the server and your installation of OpenSSL support.\n\n This parameter is deprecated instead use 'ssl_minimum_version'.\n :param ssl_minimum_version:\n The minimum version of TLS to be used. Use the 'ssl.TLSVersion' enum for specifying the value.\n :param ssl_maximum_version:\n The maximum version of TLS to be used. Use the 'ssl.TLSVersion' enum for specifying the value.\n Not recommended to set to anything other than 'ssl.TLSVersion.MAXIMUM_SUPPORTED' which is the\n default value.\n :param cert_reqs:\n Whether to require the certificate verification. This defaults to\n ``ssl.CERT_REQUIRED``.\n :param options:\n Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,\n ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``, and ``ssl.OP_NO_TICKET``.\n :param ciphers:\n Which cipher suites to allow the server to select. Defaults to either system configured\n ciphers if OpenSSL 1.1.1+, otherwise uses a secure default set of ciphers.\n :returns:\n Constructed SSLContext object with specified options\n :rtype: SSLContext\n \"\"\"\n if SSLContext is None:\n raise TypeError(\"Can't create an SSLContext object without an ssl module\")\n\n # This means 'ssl_version' was specified as an exact value.\n if ssl_version not in (None, PROTOCOL_TLS, PROTOCOL_TLS_CLIENT):\n # Disallow setting 'ssl_version' and 'ssl_minimum|maximum_version'\n # to avoid conflicts.\n if ssl_minimum_version is not None or ssl_maximum_version is not None:\n raise ValueError(\n \"Can't specify both 'ssl_version' and either \"\n \"'ssl_minimum_version' or 'ssl_maximum_version'\"\n )\n\n # 'ssl_version' is deprecated and will be removed in the future.\n else:\n # Use 'ssl_minimum_version' and 'ssl_maximum_version' instead.\n ssl_minimum_version = _SSL_VERSION_TO_TLS_VERSION.get(\n ssl_version, TLSVersion.MINIMUM_SUPPORTED\n )\n ssl_maximum_version = _SSL_VERSION_TO_TLS_VERSION.get(\n ssl_version, TLSVersion.MAXIMUM_SUPPORTED\n )\n\n # This warning message is pushing users to use 'ssl_minimum_version'\n # instead of both min/max. Best practice is to only set the minimum version and\n # keep the maximum version to be it's default value: 'TLSVersion.MAXIMUM_SUPPORTED'\n warnings.warn(\n \"'ssl_version' option is deprecated and will be \"\n \"removed in urllib3 v2.1.0. Instead use 'ssl_minimum_version'\",\n category=DeprecationWarning,\n stacklevel=2,\n )\n\n # PROTOCOL_TLS is deprecated in Python 3.10 so we always use PROTOCOL_TLS_CLIENT\n context = SSLContext(PROTOCOL_TLS_CLIENT)\n\n if ssl_minimum_version is not None:\n context.minimum_version = ssl_minimum_version\n else: # Python <3.10 defaults to 'MINIMUM_SUPPORTED' so explicitly set TLSv1.2 here\n context.minimum_version = TLSVersion.TLSv1_2\n\n if ssl_maximum_version is not None:\n context.maximum_version = ssl_maximum_version\n\n # Unless we're given ciphers defer to either system ciphers in\n # the case of OpenSSL 1.1.1+ or use our own secure default ciphers.\n if ciphers:\n context.set_ciphers(ciphers)\n\n # Setting the default here, as we may have no ssl module on import\n cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs\n\n if options is None:\n options = 0\n # SSLv2 is easily broken and is considered harmful and dangerous\n options |= OP_NO_SSLv2\n # SSLv3 has several problems and is now dangerous\n options |= OP_NO_SSLv3\n # Disable compression to prevent CRIME attacks for OpenSSL 1.0+\n # (issue #309)\n options |= OP_NO_COMPRESSION\n # TLSv1.2 only. Unless set explicitly, do not request tickets.\n # This may save some bandwidth on wire, and although the ticket is encrypted,\n # there is a risk associated with it being on wire,\n # if the server is not rotating its ticketing keys properly.\n options |= OP_NO_TICKET\n\n context.options |= options\n\n # Enable post-handshake authentication for TLS 1.3, see GH #1634. PHA is\n # necessary for conditional client cert authentication with TLS 1.3.\n # The attribute is None for OpenSSL <= 1.1.0 or does not exist in older\n # versions of Python. We only enable if certificate verification is enabled to work\n # around Python issue #37428\n # See: https://bugs.python.org/issue37428\n if (\n cert_reqs == ssl.CERT_REQUIRED\n and getattr(context, \"post_handshake_auth\", None) is not None\n ):\n context.post_handshake_auth = True\n\n # The order of the below lines setting verify_mode and check_hostname\n # matter due to safe-guards SSLContext has to prevent an SSLContext with\n # check_hostname=True, verify_mode=NONE/OPTIONAL.\n # We always set 'check_hostname=False' for pyOpenSSL so we rely on our own\n # 'ssl.match_hostname()' implementation.\n if cert_reqs == ssl.CERT_REQUIRED and not IS_PYOPENSSL:\n context.verify_mode = cert_reqs\n context.check_hostname = True\n else:\n context.check_hostname = False\n context.verify_mode = cert_reqs\n\n try:\n context.hostname_checks_common_name = False\n except AttributeError: # Defensive: for CPython < 3.8.9 and 3.9.3; for PyPy < 7.3.8\n pass\n\n # Enable logging of TLS session keys via defacto standard environment variable\n # 'SSLKEYLOGFILE', if the feature is available (Python 3.8+). Skip empty values.\n if hasattr(context, \"keylog_filename\"):\n sslkeylogfile = os.environ.get(\"SSLKEYLOGFILE\")\n if sslkeylogfile:\n context.keylog_filename = sslkeylogfile\n\n return context" }, { "identifier": "is_ipaddress", "path": ".venv/Lib/site-packages/urllib3/util/ssl_.py", "snippet": "def is_ipaddress(hostname: str | bytes) -> bool:\n \"\"\"Detects whether the hostname given is an IPv4 or IPv6 address.\n Also detects IPv6 addresses with Zone IDs.\n\n :param str hostname: Hostname to examine.\n :return: True if the hostname is an IP address, False otherwise.\n \"\"\"\n if isinstance(hostname, bytes):\n # IDN A-label bytes are ASCII compatible.\n hostname = hostname.decode(\"ascii\")\n return bool(_IPV4_RE.match(hostname) or _BRACELESS_IPV6_ADDRZ_RE.match(hostname))" }, { "identifier": "resolve_cert_reqs", "path": ".venv/Lib/site-packages/urllib3/util/ssl_.py", "snippet": "def resolve_cert_reqs(candidate: None | int | str) -> VerifyMode:\n \"\"\"\n Resolves the argument to a numeric constant, which can be passed to\n the wrap_socket function/method from the ssl module.\n Defaults to :data:`ssl.CERT_REQUIRED`.\n If given a string it is assumed to be the name of the constant in the\n :mod:`ssl` module or its abbreviation.\n (So you can specify `REQUIRED` instead of `CERT_REQUIRED`.\n If it's neither `None` nor a string we assume it is already the numeric\n constant which can directly be passed to wrap_socket.\n \"\"\"\n if candidate is None:\n return CERT_REQUIRED\n\n if isinstance(candidate, str):\n res = getattr(ssl, candidate, None)\n if res is None:\n res = getattr(ssl, \"CERT_\" + candidate)\n return res # type: ignore[no-any-return]\n\n return candidate # type: ignore[return-value]" }, { "identifier": "resolve_ssl_version", "path": ".venv/Lib/site-packages/urllib3/util/ssl_.py", "snippet": "def resolve_ssl_version(candidate: None | int | str) -> int:\n \"\"\"\n like resolve_cert_reqs\n \"\"\"\n if candidate is None:\n return PROTOCOL_TLS\n\n if isinstance(candidate, str):\n res = getattr(ssl, candidate, None)\n if res is None:\n res = getattr(ssl, \"PROTOCOL_\" + candidate)\n return typing.cast(int, res)\n\n return candidate" }, { "identifier": "ssl_wrap_socket", "path": ".venv/Lib/site-packages/urllib3/util/ssl_.py", "snippet": "@typing.overload\ndef ssl_wrap_socket(\n sock: socket.socket,\n keyfile: str | None = ...,\n certfile: str | None = ...,\n cert_reqs: int | None = ...,\n ca_certs: str | None = ...,\n server_hostname: str | None = ...,\n ssl_version: int | None = ...,\n ciphers: str | None = ...,\n ssl_context: ssl.SSLContext | None = ...,\n ca_cert_dir: str | None = ...,\n key_password: str | None = ...,\n ca_cert_data: None | str | bytes = ...,\n tls_in_tls: Literal[False] = ...,\n) -> ssl.SSLSocket:\n ..." }, { "identifier": "CertificateError", "path": ".venv/Lib/site-packages/urllib3/util/ssl_match_hostname.py", "snippet": "class CertificateError(ValueError):\n pass" }, { "identifier": "match_hostname", "path": ".venv/Lib/site-packages/urllib3/util/ssl_match_hostname.py", "snippet": "def match_hostname(\n cert: _TYPE_PEER_CERT_RET_DICT | None,\n hostname: str,\n hostname_checks_common_name: bool = False,\n) -> None:\n \"\"\"Verify that *cert* (in decoded format as returned by\n SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125\n rules are followed, but IP addresses are not accepted for *hostname*.\n\n CertificateError is raised on failure. On success, the function\n returns nothing.\n \"\"\"\n if not cert:\n raise ValueError(\n \"empty or no certificate, match_hostname needs a \"\n \"SSL socket or SSL context with either \"\n \"CERT_OPTIONAL or CERT_REQUIRED\"\n )\n try:\n # Divergence from upstream: ipaddress can't handle byte str\n #\n # The ipaddress module shipped with Python < 3.9 does not support\n # scoped IPv6 addresses so we unconditionally strip the Zone IDs for\n # now. Once we drop support for Python 3.9 we can remove this branch.\n if \"%\" in hostname:\n host_ip = ipaddress.ip_address(hostname[: hostname.rfind(\"%\")])\n else:\n host_ip = ipaddress.ip_address(hostname)\n\n except ValueError:\n # Not an IP address (common case)\n host_ip = None\n dnsnames = []\n san: tuple[tuple[str, str], ...] = cert.get(\"subjectAltName\", ())\n key: str\n value: str\n for key, value in san:\n if key == \"DNS\":\n if host_ip is None and _dnsname_match(value, hostname):\n return\n dnsnames.append(value)\n elif key == \"IP Address\":\n if host_ip is not None and _ipaddress_match(value, host_ip):\n return\n dnsnames.append(value)\n\n # We only check 'commonName' if it's enabled and we're not verifying\n # an IP address. IP addresses aren't valid within 'commonName'.\n if hostname_checks_common_name and host_ip is None and not dnsnames:\n for sub in cert.get(\"subject\", ()):\n for key, value in sub:\n if key == \"commonName\":\n if _dnsname_match(value, hostname):\n return\n dnsnames.append(value)\n\n if len(dnsnames) > 1:\n raise CertificateError(\n \"hostname %r \"\n \"doesn't match either of %s\" % (hostname, \", \".join(map(repr, dnsnames)))\n )\n elif len(dnsnames) == 1:\n raise CertificateError(f\"hostname {hostname!r} doesn't match {dnsnames[0]!r}\")\n else:\n raise CertificateError(\"no appropriate subjectAltName fields were found\")" }, { "identifier": "Url", "path": ".venv/Lib/site-packages/urllib3/util/url.py", "snippet": "class Url(\n typing.NamedTuple(\n \"Url\",\n [\n (\"scheme\", typing.Optional[str]),\n (\"auth\", typing.Optional[str]),\n (\"host\", typing.Optional[str]),\n (\"port\", typing.Optional[int]),\n (\"path\", typing.Optional[str]),\n (\"query\", typing.Optional[str]),\n (\"fragment\", typing.Optional[str]),\n ],\n )\n):\n \"\"\"\n Data structure for representing an HTTP URL. Used as a return value for\n :func:`parse_url`. Both the scheme and host are normalized as they are\n both case-insensitive according to RFC 3986.\n \"\"\"\n\n def __new__( # type: ignore[no-untyped-def]\n cls,\n scheme: str | None = None,\n auth: str | None = None,\n host: str | None = None,\n port: int | None = None,\n path: str | None = None,\n query: str | None = None,\n fragment: str | None = None,\n ):\n if path and not path.startswith(\"/\"):\n path = \"/\" + path\n if scheme is not None:\n scheme = scheme.lower()\n return super().__new__(cls, scheme, auth, host, port, path, query, fragment)\n\n @property\n def hostname(self) -> str | None:\n \"\"\"For backwards-compatibility with urlparse. We're nice like that.\"\"\"\n return self.host\n\n @property\n def request_uri(self) -> str:\n \"\"\"Absolute path including the query string.\"\"\"\n uri = self.path or \"/\"\n\n if self.query is not None:\n uri += \"?\" + self.query\n\n return uri\n\n @property\n def authority(self) -> str | None:\n \"\"\"\n Authority component as defined in RFC 3986 3.2.\n This includes userinfo (auth), host and port.\n\n i.e.\n userinfo@host:port\n \"\"\"\n userinfo = self.auth\n netloc = self.netloc\n if netloc is None or userinfo is None:\n return netloc\n else:\n return f\"{userinfo}@{netloc}\"\n\n @property\n def netloc(self) -> str | None:\n \"\"\"\n Network location including host and port.\n\n If you need the equivalent of urllib.parse's ``netloc``,\n use the ``authority`` property instead.\n \"\"\"\n if self.host is None:\n return None\n if self.port:\n return f\"{self.host}:{self.port}\"\n return self.host\n\n @property\n def url(self) -> str:\n \"\"\"\n Convert self into a url\n\n This function should more or less round-trip with :func:`.parse_url`. The\n returned url may not be exactly the same as the url inputted to\n :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls\n with a blank port will have : removed).\n\n Example:\n\n .. code-block:: python\n\n import urllib3\n\n U = urllib3.util.parse_url(\"https://google.com/mail/\")\n\n print(U.url)\n # \"https://google.com/mail/\"\n\n print( urllib3.util.Url(\"https\", \"username:password\",\n \"host.com\", 80, \"/path\", \"query\", \"fragment\"\n ).url\n )\n # \"https://username:[email protected]:80/path?query#fragment\"\n \"\"\"\n scheme, auth, host, port, path, query, fragment = self\n url = \"\"\n\n # We use \"is not None\" we want things to happen with empty strings (or 0 port)\n if scheme is not None:\n url += scheme + \"://\"\n if auth is not None:\n url += auth + \"@\"\n if host is not None:\n url += host\n if port is not None:\n url += \":\" + str(port)\n if path is not None:\n url += path\n if query is not None:\n url += \"?\" + query\n if fragment is not None:\n url += \"#\" + fragment\n\n return url\n\n def __str__(self) -> str:\n return self.url" } ]
import datetime import logging import os import re import socket import sys import typing import warnings import ssl from http.client import HTTPConnection as _HTTPConnection from http.client import HTTPException as HTTPException # noqa: F401 from http.client import ResponseNotReady from socket import timeout as SocketTimeout from typing import Literal from .response import HTTPResponse from .util.ssl_ import _TYPE_PEER_CERT_RET_DICT from .util.ssltransport import SSLTransport from ._collections import HTTPHeaderDict from .util.response import assert_header_parsing from .util.timeout import _DEFAULT_TIMEOUT, _TYPE_TIMEOUT, Timeout from .util.util import to_str from .util.wait import wait_for_read from ._base_connection import _TYPE_BODY from ._base_connection import ProxyConfig as ProxyConfig from ._base_connection import _ResponseOptions as _ResponseOptions from ._version import __version__ from .exceptions import ( ConnectTimeoutError, HeaderParsingError, NameResolutionError, NewConnectionError, ProxyError, SystemTimeWarning, ) from .util import SKIP_HEADER, SKIPPABLE_HEADERS, connection, ssl_ from .util.request import body_to_chunks from .util.ssl_ import assert_fingerprint as _assert_fingerprint from .util.ssl_ import ( create_urllib3_context, is_ipaddress, resolve_cert_reqs, resolve_ssl_version, ssl_wrap_socket, ) from .util.ssl_match_hostname import CertificateError, match_hostname from .util.url import Url from .response import HTTPResponse
13,429
from __future__ import annotations if typing.TYPE_CHECKING: try: # Compiled with SSL? BaseSSLError = ssl.SSLError except (ImportError, AttributeError): ssl = None # type: ignore[assignment] class BaseSSLError(BaseException): # type: ignore[no-redef] pass
from __future__ import annotations if typing.TYPE_CHECKING: try: # Compiled with SSL? BaseSSLError = ssl.SSLError except (ImportError, AttributeError): ssl = None # type: ignore[assignment] class BaseSSLError(BaseException): # type: ignore[no-redef] pass
from ._base_connection import ProxyConfig as ProxyConfig
8
2023-12-16 04:12:01+00:00
16k
YaoFANGUK/video-subtitle-remover
backend/scenedetect/scene_manager.py
[ { "identifier": "SimpleTableCell", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTableCell(object):\n \"\"\"A table class to create table cells.\n\n Example:\n cell = SimpleTableCell('Hello, world!')\n \"\"\"\n\n def __init__(self, text, header=False):\n \"\"\"Table cell constructor.\n\n Keyword arguments:\n text -- text to be displayed\n header -- flag to indicate this cell is a header cell.\n \"\"\"\n self.text = text\n self.header = header\n\n def __str__(self):\n \"\"\"Return the HTML code for the table cell.\"\"\"\n if self.header:\n return '<th>%s</th>' % (self.text)\n else:\n return '<td>%s</td>' % (self.text)" }, { "identifier": "SimpleTableImage", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTableImage(object):\n \"\"\"A table class to create table cells with an image.\n\n Example:\n cell = SimpleTableImage('images/image_1.jpg')\n \"\"\"\n\n def __init__(self, image_file, width=None, height=None):\n \"\"\"Table cell constructor.\n\n Keyword arguments:\n image_file -- relative filepath to image file to display.\n width -- (optional) width of the image in pixels\n height -- (optional) height of the image in pixels\n \"\"\"\n self.image_file = image_file\n if width:\n self.width = round(width)\n else:\n self.width = width\n if height:\n self.height = round(height)\n else:\n self.height = height\n\n def __str__(self):\n \"\"\"Return the HTML code for the table cell with the image.\"\"\"\n safe_filename = quote(self.image_file)\n output = '<a href=\"%s\" target=\"_blank\">' % (safe_filename)\n output += '<img src=\"%s\"' % (safe_filename)\n if self.height:\n output += ' height=\"%s\"' % (self.height)\n if self.width:\n output += ' width=\"%s\"' % (self.width)\n output += '></a>'\n\n return output" }, { "identifier": "SimpleTableRow", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTableRow(object):\n \"\"\"A table class to create table rows, populated by table cells.\n\n Example:\n # Row from list\n row = SimpleTableRow(['Hello,', 'world!'])\n\n # Row from SimpleTableCell\n cell1 = SimpleTableCell('Hello,')\n cell2 = SimpleTableCell('world!')\n row = SimpleTableRow([cell1, cell2])\n \"\"\"\n\n def __init__(self, cells=None, header=False):\n \"\"\"Table row constructor.\n\n Keyword arguments:\n cells -- iterable of SimpleTableCell (default None)\n header -- flag to indicate this row is a header row.\n if the cells are SimpleTableCell, it is the programmer's\n responsibility to verify whether it was created with the\n header flag set to True.\n \"\"\"\n cells = cells or []\n if isinstance(cells[0], SimpleTableCell):\n self.cells = cells\n else:\n self.cells = [SimpleTableCell(cell, header=header) for cell in cells]\n\n self.header = header\n\n def __str__(self):\n \"\"\"Return the HTML code for the table row and its cells as a string.\"\"\"\n row = []\n\n row.append('<tr>')\n\n for cell in self.cells:\n row.append(str(cell))\n\n row.append('</tr>')\n\n return '\\n'.join(row)\n\n def __iter__(self):\n \"\"\"Iterate through row cells\"\"\"\n for cell in self.cells:\n yield cell\n\n def add_cell(self, cell):\n \"\"\"Add a SimpleTableCell object to the list of cells.\"\"\"\n self.cells.append(cell)\n\n def add_cells(self, cells):\n \"\"\"Add a list of SimpleTableCell objects to the list of cells.\"\"\"\n for cell in cells:\n self.cells.append(cell)" }, { "identifier": "SimpleTable", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTable(object):\n \"\"\"A table class to create HTML tables, populated by HTML table rows.\n\n Example:\n # Table from lists\n table = SimpleTable([['Hello,', 'world!'], ['How', 'are', 'you?']])\n\n # Table with header row\n table = SimpleTable([['Hello,', 'world!'], ['How', 'are', 'you?']],\n header_row=['Header1', 'Header2', 'Header3'])\n\n # Table from SimpleTableRow\n rows = SimpleTableRow(['Hello,', 'world!'])\n table = SimpleTable(rows)\n \"\"\"\n\n def __init__(self, rows=None, header_row=None, css_class=None):\n \"\"\"Table constructor.\n\n Keyword arguments:\n rows -- iterable of SimpleTableRow\n header_row -- row that will be displayed at the beginning of the table.\n if this row is SimpleTableRow, it is the programmer's\n responsibility to verify whether it was created with the\n header flag set to True.\n css_class -- table CSS class\n \"\"\"\n rows = rows or []\n if isinstance(rows[0], SimpleTableRow):\n self.rows = rows\n else:\n self.rows = [SimpleTableRow(row) for row in rows]\n\n if header_row is None:\n self.header_row = None\n elif isinstance(header_row, SimpleTableRow):\n self.header_row = header_row\n else:\n self.header_row = SimpleTableRow(header_row, header=True)\n\n self.css_class = css_class\n\n def __str__(self):\n \"\"\"Return the HTML code for the table as a string.\"\"\"\n table = []\n\n if self.css_class:\n table.append('<table class=%s>' % self.css_class)\n else:\n table.append('<table>')\n\n if self.header_row:\n table.append(str(self.header_row))\n\n for row in self.rows:\n table.append(str(row))\n\n table.append('</table>')\n\n return '\\n'.join(table)\n\n def __iter__(self):\n \"\"\"Iterate through table rows\"\"\"\n for row in self.rows:\n yield row\n\n def add_row(self, row):\n \"\"\"Add a SimpleTableRow object to the list of rows.\"\"\"\n self.rows.append(row)\n\n def add_rows(self, rows):\n \"\"\"Add a list of SimpleTableRow objects to the list of rows.\"\"\"\n for row in rows:\n self.rows.append(row)" }, { "identifier": "HTMLPage", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class HTMLPage(object):\n \"\"\"A class to create HTML pages containing CSS and tables.\"\"\"\n\n def __init__(self, tables=None, css=None, encoding=\"utf-8\"):\n \"\"\"HTML page constructor.\n\n Keyword arguments:\n tables -- List of SimpleTable objects\n css -- Cascading Style Sheet specification that is appended before the\n table string\n encoding -- Characters encoding. Default: UTF-8\n \"\"\"\n self.tables = tables or []\n self.css = css\n self.encoding = encoding\n\n def __str__(self):\n \"\"\"Return the HTML page as a string.\"\"\"\n page = []\n\n if self.css:\n page.append('<style type=\"text/css\">\\n%s\\n</style>' % self.css)\n\n # Set encoding\n page.append('<meta http-equiv=\"Content-Type\" content=\"text/html;'\n 'charset=%s\">' % self.encoding)\n\n for table in self.tables:\n page.append(str(table))\n page.append('<br />')\n\n return '\\n'.join(page)\n\n def __iter__(self):\n \"\"\"Iterate through tables\"\"\"\n for table in self.tables:\n yield table\n\n def save(self, filename):\n \"\"\"Save HTML page to a file using the proper encoding\"\"\"\n with codecs.open(filename, 'w', self.encoding) as outfile:\n for line in str(self):\n outfile.write(line)\n\n def add_table(self, table):\n \"\"\"Add a SimpleTable to the page list of tables\"\"\"\n self.tables.append(table)" }, { "identifier": "tqdm", "path": "backend/scenedetect/platform.py", "snippet": "class FakeTqdmObject:\nclass FakeTqdmLoggingRedirect:\nclass CommandTooLong(Exception):\nclass Template(string.Template):\n def __init__(self, **kawrgs):\n def update(self, n=1):\n def close(self):\n def set_description(self, desc=None, refresh=True):\n def __init__(self, **kawrgs):\n def __enter__(self):\n def __exit__(self, type, value, traceback):\ndef get_cv2_imwrite_params() -> Dict[str, Union[int, None]]:\n def _get_cv2_param(param_name: str) -> Union[int, None]:\ndef get_file_name(file_path: AnyStr, include_extension=True) -> AnyStr:\ndef get_and_create_path(file_path: AnyStr, output_directory: Optional[AnyStr] = None) -> AnyStr:\ndef init_logger(log_level: int = logging.INFO,\n show_stdout: bool = False,\n log_file: Optional[str] = None):\ndef invoke_command(args: List[str]) -> int:\ndef get_ffmpeg_path() -> Optional[str]:\ndef get_ffmpeg_version() -> Optional[str]:\ndef get_mkvmerge_version() -> Optional[str]:\ndef get_system_version_info() -> str:\n INFO_TEMPLATE = '[PySceneDetect] %(message)s'\n DEBUG_TEMPLATE = '%(levelname)s: %(module)s.%(funcName)s(): %(message)s'" }, { "identifier": "FrameTimecode", "path": "backend/scenedetect/frame_timecode.py", "snippet": "class FrameTimecode:\n \"\"\"Object for frame-based timecodes, using the video framerate to compute back and\n forth between frame number and seconds/timecode.\n\n A timecode is valid only if it complies with one of the following three types/formats:\n\n 1. Timecode as `str` in the form 'HH:MM:SS[.nnn]' (`'01:23:45'` or `'01:23:45.678'`)\n 2. Number of seconds as `float`, or `str` in form 'Ss' or 'S.SSSs' (`'2s'` or `'2.3456s'`)\n 3. Exact number of frames as `int`, or `str` in form NNNNN (`123` or `'123'`)\n \"\"\"\n\n def __init__(self,\n timecode: Union[int, float, str, 'FrameTimecode'] = None,\n fps: Union[int, float, str, 'FrameTimecode'] = None):\n \"\"\"\n Arguments:\n timecode: A frame number (int), number of seconds (float), or timecode (str in\n the form `'HH:MM:SS'` or `'HH:MM:SS.nnn'`).\n fps: The framerate or FrameTimecode to use as a time base for all arithmetic.\n Raises:\n TypeError: Thrown if either `timecode` or `fps` are unsupported types.\n ValueError: Thrown when specifying a negative timecode or framerate.\n \"\"\"\n # The following two properties are what is used to keep track of time\n # in a frame-specific manner. Note that once the framerate is set,\n # the value should never be modified (only read if required).\n # TODO(v1.0): Make these actual @properties.\n self.framerate = None\n self.frame_num = None\n\n # Copy constructor. Only the timecode argument is used in this case.\n if isinstance(timecode, FrameTimecode):\n self.framerate = timecode.framerate\n self.frame_num = timecode.frame_num\n if fps is not None:\n raise TypeError('Framerate cannot be overwritten when copying a FrameTimecode.')\n else:\n # Ensure other arguments are consistent with API.\n if fps is None:\n raise TypeError('Framerate (fps) is a required argument.')\n if isinstance(fps, FrameTimecode):\n fps = fps.framerate\n\n # Process the given framerate, if it was not already set.\n if not isinstance(fps, (int, float)):\n raise TypeError('Framerate must be of type int/float.')\n if (isinstance(fps, int) and not fps > 0) or (isinstance(fps, float)\n and not fps >= MAX_FPS_DELTA):\n raise ValueError('Framerate must be positive and greater than zero.')\n self.framerate = float(fps)\n\n # Process the timecode value, storing it as an exact number of frames.\n if isinstance(timecode, str):\n self.frame_num = self._parse_timecode_string(timecode)\n else:\n self.frame_num = self._parse_timecode_number(timecode)\n\n # TODO(v1.0): Add a `frame` property to replace the existing one and deprecate this getter.\n def get_frames(self) -> int:\n \"\"\"Get the current time/position in number of frames. This is the\n equivalent of accessing the self.frame_num property (which, along\n with the specified framerate, forms the base for all of the other\n time measurement calculations, e.g. the :meth:`get_seconds` method).\n\n If using to compare a :class:`FrameTimecode` with a frame number,\n you can do so directly against the object (e.g. ``FrameTimecode(10, 10.0) <= 10``).\n\n Returns:\n int: The current time in frames (the current frame number).\n \"\"\"\n return self.frame_num\n\n # TODO(v1.0): Add a `framerate` property to replace the existing one and deprecate this getter.\n def get_framerate(self) -> float:\n \"\"\"Get Framerate: Returns the framerate used by the FrameTimecode object.\n\n Returns:\n float: Framerate of the current FrameTimecode object, in frames per second.\n \"\"\"\n return self.framerate\n\n def equal_framerate(self, fps) -> bool:\n \"\"\"Equal Framerate: Determines if the passed framerate is equal to that of this object.\n\n Arguments:\n fps: Framerate to compare against within the precision constant defined in this module\n (see :data:`MAX_FPS_DELTA`).\n Returns:\n bool: True if passed fps matches the FrameTimecode object's framerate, False otherwise.\n\n \"\"\"\n return math.fabs(self.framerate - fps) < MAX_FPS_DELTA\n\n # TODO(v1.0): Add a `seconds` property to replace this and deprecate the existing one.\n def get_seconds(self) -> float:\n \"\"\"Get the frame's position in number of seconds.\n\n If using to compare a :class:`FrameTimecode` with a frame number,\n you can do so directly against the object (e.g. ``FrameTimecode(10, 10.0) <= 1.0``).\n\n Returns:\n float: The current time/position in seconds.\n \"\"\"\n return float(self.frame_num) / self.framerate\n\n # TODO(v1.0): Add a `timecode` property to replace this and deprecate the existing one.\n def get_timecode(self, precision: int = 3, use_rounding: bool = True) -> str:\n \"\"\"Get a formatted timecode string of the form HH:MM:SS[.nnn].\n\n Args:\n precision: The number of decimal places to include in the output ``[.nnn]``.\n use_rounding: Rounds the output to the desired precision. If False, the value\n will be truncated to the specified precision.\n\n Returns:\n str: The current time in the form ``\"HH:MM:SS[.nnn]\"``.\n \"\"\"\n # Compute hours and minutes based off of seconds, and update seconds.\n secs = self.get_seconds()\n base = 60.0 * 60.0\n hrs = int(secs / base)\n secs -= (hrs * base)\n base = 60.0\n mins = int(secs / base)\n secs -= (mins * base)\n # Convert seconds into string based on required precision.\n if precision > 0:\n if use_rounding:\n secs = round(secs, precision)\n msec = format(secs, '.%df' % precision)[-precision:]\n secs = '%02d.%s' % (int(secs), msec)\n else:\n secs = '%02d' % int(round(secs, 0)) if use_rounding else '%02d' % int(secs)\n # Return hours, minutes, and seconds as a formatted timecode string.\n return '%02d:%02d:%s' % (hrs, mins, secs)\n\n # TODO(v1.0): Add a `previous` property to replace the existing one and deprecate this getter.\n def previous_frame(self) -> 'FrameTimecode':\n \"\"\"Return a new FrameTimecode for the previous frame (or 0 if on frame 0).\"\"\"\n new_timecode = FrameTimecode(self)\n new_timecode.frame_num = max(0, new_timecode.frame_num - 1)\n return new_timecode\n\n def _seconds_to_frames(self, seconds: float) -> int:\n \"\"\"Convert the passed value seconds to the nearest number of frames using\n the current FrameTimecode object's FPS (self.framerate).\n\n Returns:\n Integer number of frames the passed number of seconds represents using\n the current FrameTimecode's framerate property.\n \"\"\"\n return round(seconds * self.framerate)\n\n def _parse_timecode_number(self, timecode: Union[int, float]) -> int:\n \"\"\" Parse a timecode number, storing it as the exact number of frames.\n Can be passed as frame number (int), seconds (float)\n\n Raises:\n TypeError, ValueError\n \"\"\"\n # Process the timecode value, storing it as an exact number of frames.\n # Exact number of frames N\n if isinstance(timecode, int):\n if timecode < 0:\n raise ValueError('Timecode frame number must be positive and greater than zero.')\n return timecode\n # Number of seconds S\n elif isinstance(timecode, float):\n if timecode < 0.0:\n raise ValueError('Timecode value must be positive and greater than zero.')\n return self._seconds_to_frames(timecode)\n # FrameTimecode\n elif isinstance(timecode, FrameTimecode):\n return timecode.frame_num\n elif timecode is None:\n raise TypeError('Timecode/frame number must be specified!')\n else:\n raise TypeError('Timecode format/type unrecognized.')\n\n def _parse_timecode_string(self, timecode_string: str) -> int:\n \"\"\"Parses a string based on the three possible forms (in timecode format,\n as an integer number of frames, or floating-point seconds, ending with 's').\n\n Requires that the `framerate` property is set before calling this method.\n Assuming a framerate of 30.0 FPS, the strings '00:05:00.000', '00:05:00',\n '9000', '300s', and '300.0s' are all possible valid values, all representing\n a period of time equal to 5 minutes, 300 seconds, or 9000 frames (at 30 FPS).\n\n Raises:\n TypeError, ValueError\n \"\"\"\n if self.framerate is None:\n raise TypeError('self.framerate must be set before calling _parse_timecode_string.')\n # Number of seconds S\n if timecode_string.endswith('s'):\n secs = timecode_string[:-1]\n if not secs.replace('.', '').isdigit():\n raise ValueError('All characters in timecode seconds string must be digits.')\n secs = float(secs)\n if secs < 0.0:\n raise ValueError('Timecode seconds value must be positive.')\n return self._seconds_to_frames(secs)\n # Exact number of frames N\n elif timecode_string.isdigit():\n timecode = int(timecode_string)\n if timecode < 0:\n raise ValueError('Timecode frame number must be positive.')\n return timecode\n # Standard timecode in string format 'HH:MM:SS[.nnn]'\n else:\n tc_val = timecode_string.split(':')\n if not (len(tc_val) == 3 and tc_val[0].isdigit() and tc_val[1].isdigit()\n and tc_val[2].replace('.', '').isdigit()):\n raise ValueError('Unrecognized or improperly formatted timecode string.')\n hrs, mins = int(tc_val[0]), int(tc_val[1])\n secs = float(tc_val[2]) if '.' in tc_val[2] else int(tc_val[2])\n if not (hrs >= 0 and mins >= 0 and secs >= 0 and mins < 60 and secs < 60):\n raise ValueError('Invalid timecode range (values outside allowed range).')\n secs += (((hrs * 60.0) + mins) * 60.0)\n return self._seconds_to_frames(secs)\n\n def __iadd__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n if isinstance(other, int):\n self.frame_num += other\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n self.frame_num += other.frame_num\n else:\n raise ValueError('FrameTimecode instances require equal framerate for addition.')\n # Check if value to add is in number of seconds.\n elif isinstance(other, float):\n self.frame_num += self._seconds_to_frames(other)\n elif isinstance(other, str):\n self.frame_num += self._parse_timecode_string(other)\n else:\n raise TypeError('Unsupported type for performing addition with FrameTimecode.')\n if self.frame_num < 0: # Required to allow adding negative seconds/frames.\n self.frame_num = 0\n return self\n\n def __add__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n to_return = FrameTimecode(timecode=self)\n to_return += other\n return to_return\n\n def __isub__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n if isinstance(other, int):\n self.frame_num -= other\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n self.frame_num -= other.frame_num\n else:\n raise ValueError('FrameTimecode instances require equal framerate for subtraction.')\n # Check if value to add is in number of seconds.\n elif isinstance(other, float):\n self.frame_num -= self._seconds_to_frames(other)\n elif isinstance(other, str):\n self.frame_num -= self._parse_timecode_string(other)\n else:\n raise TypeError('Unsupported type for performing subtraction with FrameTimecode: %s' %\n type(other))\n if self.frame_num < 0:\n self.frame_num = 0\n return self\n\n def __sub__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n to_return = FrameTimecode(timecode=self)\n to_return -= other\n return to_return\n\n def __eq__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n if isinstance(other, int):\n return self.frame_num == other\n elif isinstance(other, float):\n return self.get_seconds() == other\n elif isinstance(other, str):\n return self.frame_num == self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num == other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n elif other is None:\n return False\n else:\n raise TypeError('Unsupported type for performing == with FrameTimecode: %s' %\n type(other))\n\n def __ne__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n return not self == other\n\n def __lt__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num < other\n elif isinstance(other, float):\n return self.get_seconds() < other\n elif isinstance(other, str):\n return self.frame_num < self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num < other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing < with FrameTimecode: %s' %\n type(other))\n\n def __le__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num <= other\n elif isinstance(other, float):\n return self.get_seconds() <= other\n elif isinstance(other, str):\n return self.frame_num <= self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num <= other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing <= with FrameTimecode: %s' %\n type(other))\n\n def __gt__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num > other\n elif isinstance(other, float):\n return self.get_seconds() > other\n elif isinstance(other, str):\n return self.frame_num > self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num > other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing > with FrameTimecode: %s' %\n type(other))\n\n def __ge__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num >= other\n elif isinstance(other, float):\n return self.get_seconds() >= other\n elif isinstance(other, str):\n return self.frame_num >= self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num >= other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing >= with FrameTimecode: %s' %\n type(other))\n\n # TODO(v1.0): __int__ and __float__ should be removed. Mark as deprecated, and indicate\n # need to use relevant property instead.\n\n def __int__(self) -> int:\n return self.frame_num\n\n def __float__(self) -> float:\n return self.get_seconds()\n\n def __str__(self) -> str:\n return self.get_timecode()\n\n def __repr__(self) -> str:\n return '%s [frame=%d, fps=%.3f]' % (self.get_timecode(), self.frame_num, self.framerate)\n\n def __hash__(self) -> int:\n return self.frame_num" }, { "identifier": "VideoStream", "path": "backend/scenedetect/video_stream.py", "snippet": "class VideoStream(ABC):\n \"\"\" Interface which all video backends must implement. \"\"\"\n\n #\n # Default Implementations\n #\n\n @property\n def base_timecode(self) -> FrameTimecode:\n \"\"\"FrameTimecode object to use as a time base.\"\"\"\n return FrameTimecode(timecode=0, fps=self.frame_rate)\n\n #\n # Abstract Static Methods\n #\n\n @staticmethod\n @abstractmethod\n def BACKEND_NAME() -> str:\n \"\"\"Unique name used to identify this backend. Should be a static property in derived\n classes (`BACKEND_NAME = 'backend_identifier'`).\"\"\"\n raise NotImplementedError\n\n #\n # Abstract Properties\n #\n\n @property\n @abstractmethod\n def path(self) -> Union[bytes, str]:\n \"\"\"Video or device path.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def name(self) -> Union[bytes, str]:\n \"\"\"Name of the video, without extension, or device.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def is_seekable(self) -> bool:\n \"\"\"True if seek() is allowed, False otherwise.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def frame_rate(self) -> float:\n \"\"\"Frame rate in frames/sec.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def duration(self) -> Optional[FrameTimecode]:\n \"\"\"Duration of the stream as a FrameTimecode, or None if non terminating.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def frame_size(self) -> Tuple[int, int]:\n \"\"\"Size of each video frame in pixels as a tuple of (width, height).\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def aspect_ratio(self) -> float:\n \"\"\"Pixel aspect ratio as a float (1.0 represents square pixels).\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def position(self) -> FrameTimecode:\n \"\"\"Current position within stream as FrameTimecode.\n\n This can be interpreted as presentation time stamp, thus frame 1 corresponds\n to the presentation time 0. Returns 0 even if `frame_number` is 1.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def position_ms(self) -> float:\n \"\"\"Current position within stream as a float of the presentation time in\n milliseconds. The first frame has a PTS of 0.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def frame_number(self) -> int:\n \"\"\"Current position within stream as the frame number.\n\n Will return 0 until the first frame is `read`.\"\"\"\n raise NotImplementedError\n\n #\n # Abstract Methods\n #\n\n @abstractmethod\n def read(self, decode: bool = True, advance: bool = True) -> Union[ndarray, bool]:\n \"\"\"Read and decode the next frame as a numpy.ndarray. Returns False when video ends.\n\n Arguments:\n decode: Decode and return the frame.\n advance: Seek to the next frame. If False, will return the current (last) frame.\n\n Returns:\n If decode = True, the decoded frame (numpy.ndarray), or False (bool) if end of video.\n If decode = False, a bool indicating if advancing to the the next frame succeeded.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def reset(self) -> None:\n \"\"\" Close and re-open the VideoStream (equivalent to seeking back to beginning). \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def seek(self, target: Union[FrameTimecode, float, int]) -> None:\n \"\"\"Seek to the given timecode. If given as a frame number, represents the current seek\n pointer (e.g. if seeking to 0, the next frame decoded will be the first frame of the video).\n\n For 1-based indices (first frame is frame #1), the target frame number needs to be converted\n to 0-based by subtracting one. For example, if we want to seek to the first frame, we call\n seek(0) followed by read(). If we want to seek to the 5th frame, we call seek(4) followed\n by read(), at which point frame_number will be 5.\n\n May not be supported on all backend types or inputs (e.g. cameras).\n\n Arguments:\n target: Target position in video stream to seek to.\n If float, interpreted as time in seconds.\n If int, interpreted as frame number.\n Raises:\n SeekError: An error occurs while seeking, or seeking is not supported.\n ValueError: `target` is not a valid value (i.e. it is negative).\n \"\"\"\n raise NotImplementedError" }, { "identifier": "SceneDetector", "path": "backend/scenedetect/scene_detector.py", "snippet": "class SceneDetector:\n \"\"\" Base class to inherit from when implementing a scene detection algorithm.\n\n This API is not yet stable and subject to change.\n\n This represents a \"dense\" scene detector, which returns a list of frames where\n the next scene/shot begins in a video.\n\n Also see the implemented scene detectors in the scenedetect.detectors module\n to get an idea of how a particular detector can be created.\n \"\"\"\n # TODO(v0.7): Make this a proper abstract base class.\n\n stats_manager: Optional[StatsManager] = None\n \"\"\"Optional :class:`StatsManager <scenedetect.stats_manager.StatsManager>` to\n use for caching frame metrics to and from.\"\"\"\n\n # TODO(v1.0): Remove - this is a rarely used case for what is now a neglegible performance gain.\n def is_processing_required(self, frame_num: int) -> bool:\n \"\"\"[DEPRECATED] DO NOT USE\n\n Test if all calculations for a given frame are already done.\n\n Returns:\n False if the SceneDetector has assigned _metric_keys, and the\n stats_manager property is set to a valid StatsManager object containing\n the required frame metrics/calculations for the given frame - thus, not\n needing the frame to perform scene detection.\n\n True otherwise (i.e. the frame_img passed to process_frame is required\n to be passed to process_frame for the given frame_num).\n \"\"\"\n metric_keys = self.get_metrics()\n return not metric_keys or not (self.stats_manager is not None\n and self.stats_manager.metrics_exist(frame_num, metric_keys))\n\n def stats_manager_required(self) -> bool:\n \"\"\"Stats Manager Required: Prototype indicating if detector requires stats.\n\n Returns:\n True if a StatsManager is required for the detector, False otherwise.\n \"\"\"\n return False\n\n def get_metrics(self) -> List[str]:\n \"\"\"Get Metrics: Get a list of all metric names/keys used by the detector.\n\n Returns:\n List of strings of frame metric key names that will be used by\n the detector when a StatsManager is passed to process_frame.\n \"\"\"\n return []\n\n def process_frame(self, frame_num: int, frame_img: Optional[numpy.ndarray]) -> List[int]:\n \"\"\"Process Frame: Computes/stores metrics and detects any scene changes.\n\n Prototype method, no actual detection.\n\n Returns:\n List of frame numbers of cuts to be added to the cutting list.\n \"\"\"\n return []\n\n def post_process(self, frame_num: int) -> List[int]:\n \"\"\"Post Process: Performs any processing after the last frame has been read.\n\n Prototype method, no actual detection.\n\n Returns:\n List of frame numbers of cuts to be added to the cutting list.\n \"\"\"\n return []\n\n @property\n def event_buffer_length(self) -> int:\n \"\"\"The amount of frames a given event can be buffered for, in time. Represents maximum\n amount any event can be behind `frame_number` in the result of :meth:`process_frame`.\n \"\"\"\n return 0" }, { "identifier": "SparseSceneDetector", "path": "backend/scenedetect/scene_detector.py", "snippet": "class SparseSceneDetector(SceneDetector):\n \"\"\"Base class to inherit from when implementing a sparse scene detection algorithm.\n\n This class will be removed in v1.0 and should not be used.\n\n Unlike dense detectors, sparse detectors scene_detect \"events\" and return a *pair* of frames,\n as opposed to just a single cut.\n\n An example of a SparseSceneDetector is the MotionDetector.\n \"\"\"\n\n def process_frame(self, frame_num: int, frame_img: numpy.ndarray) -> List[Tuple[int, int]]:\n \"\"\"Process Frame: Computes/stores metrics and detects any scene changes.\n\n Prototype method, no actual detection.\n\n Returns:\n List of frame pairs representing individual scenes\n to be added to the output scene list directly.\n \"\"\"\n return []\n\n def post_process(self, frame_num: int) -> List[Tuple[int, int]]:\n \"\"\"Post Process: Performs any processing after the last frame has been read.\n\n Prototype method, no actual detection.\n\n Returns:\n List of frame pairs representing individual scenes\n to be added to the output scene list directly.\n \"\"\"\n return []" }, { "identifier": "StatsManager", "path": "backend/scenedetect/stats_manager.py", "snippet": "class StatsManager:\n \"\"\"Provides a key-value store for frame metrics/calculations which can be used\n for two-pass detection algorithms, as well as saving stats to a CSV file.\n\n Analyzing a statistics CSV file is also very useful for finding the optimal\n algorithm parameters for certain detection methods. Additionally, the data\n may be plotted by a graphing module (e.g. matplotlib) by obtaining the\n metric of interest for a series of frames by iteratively calling get_metrics(),\n after having called the detect_scenes(...) method on the SceneManager object\n which owns the given StatsManager instance.\n\n Only metrics consisting of `float` or `int` should be used currently.\n \"\"\"\n\n def __init__(self, base_timecode: FrameTimecode = None):\n \"\"\"Initialize a new StatsManager.\n\n Arguments:\n base_timecode: Timecode associated with this object. Must not be None (default value\n will be removed in a future release).\n \"\"\"\n # Frame metrics is a dict of frame (int): metric_dict (Dict[str, float])\n # of each frame metric key and the value it represents (usually float).\n self._frame_metrics: Dict[FrameTimecode, Dict[str, float]] = dict()\n self._registered_metrics: Set[str] = set() # Set of frame metric keys.\n self._loaded_metrics: Set[str] = set() # Metric keys loaded from stats file.\n self._metrics_updated: bool = False # Flag indicating if metrics require saving.\n self._base_timecode: Optional[FrameTimecode] = base_timecode # Used for timing calculations.\n\n def register_metrics(self, metric_keys: Iterable[str]) -> None:\n \"\"\"Register a list of metric keys that will be used by the detector.\n\n Used to ensure that multiple detector keys don't overlap.\n\n Raises:\n FrameMetricRegistered: A particular metric_key has already been registered/added\n to the StatsManager. Only if the StatsManager is being used for read-only\n access (i.e. all frames in the video have already been processed for the given\n metric_key in the exception) is this behavior desirable.\n \"\"\"\n for metric_key in metric_keys:\n if metric_key not in self._registered_metrics:\n self._registered_metrics.add(metric_key)\n else:\n raise FrameMetricRegistered(metric_key)\n\n # TODO(v1.0): Change frame_number to a FrameTimecode now that it is just a hash and will\n # be required for VFR support.\n def get_metrics(self, frame_number: int, metric_keys: Iterable[str]) -> List[Any]:\n \"\"\"Return the requested statistics/metrics for a given frame.\n\n Arguments:\n frame_number (int): Frame number to retrieve metrics for.\n metric_keys (List[str]): A list of metric keys to look up.\n\n Returns:\n A list containing the requested frame metrics for the given frame number\n in the same order as the input list of metric keys. If a metric could\n not be found, None is returned for that particular metric.\n \"\"\"\n return [self._get_metric(frame_number, metric_key) for metric_key in metric_keys]\n\n def set_metrics(self, frame_number: int, metric_kv_dict: Dict[str, Any]) -> None:\n \"\"\" Set Metrics: Sets the provided statistics/metrics for a given frame.\n\n Arguments:\n frame_number: Frame number to retrieve metrics for.\n metric_kv_dict: A dict mapping metric keys to the\n respective integer/floating-point metric values to set.\n \"\"\"\n for metric_key in metric_kv_dict:\n self._set_metric(frame_number, metric_key, metric_kv_dict[metric_key])\n\n def metrics_exist(self, frame_number: int, metric_keys: Iterable[str]) -> bool:\n \"\"\" Metrics Exist: Checks if the given metrics/stats exist for the given frame.\n\n Returns:\n bool: True if the given metric keys exist for the frame, False otherwise.\n \"\"\"\n return all([self._metric_exists(frame_number, metric_key) for metric_key in metric_keys])\n\n def is_save_required(self) -> bool:\n \"\"\" Is Save Required: Checks if the stats have been updated since loading.\n\n Returns:\n bool: True if there are frame metrics/statistics not yet written to disk,\n False otherwise.\n \"\"\"\n return self._metrics_updated\n\n def save_to_csv(self,\n csv_file: Union[str, bytes, TextIO],\n base_timecode: Optional[FrameTimecode] = None,\n force_save=True) -> None:\n \"\"\" Save To CSV: Saves all frame metrics stored in the StatsManager to a CSV file.\n\n Arguments:\n csv_file: A file handle opened in write mode (e.g. open('...', 'w')) or a path as str.\n base_timecode: [DEPRECATED] DO NOT USE. For backwards compatibility.\n force_save: If True, writes metrics out even if an update is not required.\n\n Raises:\n OSError: If `path` cannot be opened or a write failure occurs.\n \"\"\"\n # TODO(v0.7): Replace with DeprecationWarning that `base_timecode` will be removed in v0.8.\n if base_timecode is not None:\n logger.error('base_timecode is deprecated.')\n\n # Ensure we need to write to the file, and that we have data to do so with.\n if not ((self.is_save_required() or force_save) and self._registered_metrics\n and self._frame_metrics):\n logger.info(\"No metrics to save.\")\n return\n\n assert self._base_timecode is not None\n\n # If we get a path instead of an open file handle, recursively call ourselves\n # again but with file handle instead of path.\n if isinstance(csv_file, (str, bytes)):\n with open(csv_file, 'w') as file:\n self.save_to_csv(csv_file=file, force_save=force_save)\n return\n\n csv_writer = csv.writer(csv_file, lineterminator='\\n')\n metric_keys = sorted(list(self._registered_metrics.union(self._loaded_metrics)))\n csv_writer.writerow([COLUMN_NAME_FRAME_NUMBER, COLUMN_NAME_TIMECODE] + metric_keys)\n frame_keys = sorted(self._frame_metrics.keys())\n logger.info(\"Writing %d frames to CSV...\", len(frame_keys))\n for frame_key in frame_keys:\n frame_timecode = self._base_timecode + frame_key\n csv_writer.writerow(\n [frame_timecode.get_frames() +\n 1, frame_timecode.get_timecode()] +\n [str(metric) for metric in self.get_metrics(frame_key, metric_keys)])\n\n @staticmethod\n def valid_header(row: List[str]) -> bool:\n \"\"\"Check that the given CSV row is a valid header for a statsfile.\n\n Arguments:\n row: A row decoded from the CSV reader.\n\n Returns:\n True if `row` is a valid statsfile header, False otherwise.\n \"\"\"\n if not row or not len(row) >= 2:\n return False\n if row[0] != COLUMN_NAME_FRAME_NUMBER or row[1] != COLUMN_NAME_TIMECODE:\n return False\n return True\n\n # TODO(v1.0): Remove.\n def load_from_csv(self, csv_file: Union[str, bytes, TextIO]) -> Optional[int]:\n \"\"\"[DEPRECATED] DO NOT USE\n\n Load all metrics stored in a CSV file into the StatsManager instance. Will be removed in a\n future release after becoming a no-op.\n\n Arguments:\n csv_file: A file handle opened in read mode (e.g. open('...', 'r')) or a path as str.\n\n Returns:\n int or None: Number of frames/rows read from the CSV file, or None if the\n input file was blank or could not be found.\n\n Raises:\n StatsFileCorrupt: Stats file is corrupt and can't be loaded, or wrong file\n was specified.\n \"\"\"\n # TODO: Make this an error, then make load_from_csv() a no-op, and finally, remove it.\n logger.warning(\"load_from_csv() is deprecated and will be removed in a future release.\")\n\n # If we get a path instead of an open file handle, check that it exists, and if so,\n # recursively call ourselves again but with file set instead of path.\n if isinstance(csv_file, (str, bytes)):\n if os.path.exists(csv_file):\n with open(csv_file, 'r') as file:\n return self.load_from_csv(csv_file=file)\n # Path doesn't exist.\n return None\n\n # If we get here, file is a valid file handle in read-only text mode.\n csv_reader = csv.reader(csv_file, lineterminator='\\n')\n num_cols = None\n num_metrics = None\n num_frames = None\n # First Row: Frame Num, Timecode, [metrics...]\n try:\n row = next(csv_reader)\n # Backwards compatibility for previous versions of statsfile\n # which included an additional header row.\n if not self.valid_header(row):\n row = next(csv_reader)\n except StopIteration:\n # If the file is blank or we couldn't decode anything, assume the file was empty.\n return None\n if not self.valid_header(row):\n raise StatsFileCorrupt()\n num_cols = len(row)\n num_metrics = num_cols - 2\n if not num_metrics > 0:\n raise StatsFileCorrupt('No metrics defined in CSV file.')\n self._loaded_metrics = row[2:]\n num_frames = 0\n for row in csv_reader:\n metric_dict = {}\n if not len(row) == num_cols:\n raise StatsFileCorrupt('Wrong number of columns detected in stats file row.')\n for i, metric_str in enumerate(row[2:]):\n if metric_str and metric_str != 'None':\n try:\n metric_dict[self._loaded_metrics[i]] = float(metric_str)\n except ValueError:\n raise StatsFileCorrupt('Corrupted value in stats file: %s' %\n metric_str) from ValueError\n frame_number = int(row[0])\n # Switch from 1-based to 0-based frame numbers.\n if frame_number > 0:\n frame_number -= 1\n self.set_metrics(frame_number, metric_dict)\n num_frames += 1\n logger.info('Loaded %d metrics for %d frames.', num_metrics, num_frames)\n self._metrics_updated = False\n return num_frames\n\n def _get_metric(self, frame_number: int, metric_key: str) -> Optional[Any]:\n if self._metric_exists(frame_number, metric_key):\n return self._frame_metrics[frame_number][metric_key]\n return None\n\n def _set_metric(self, frame_number: int, metric_key: str, metric_value: Any) -> None:\n self._metrics_updated = True\n if not frame_number in self._frame_metrics:\n self._frame_metrics[frame_number] = dict()\n self._frame_metrics[frame_number][metric_key] = metric_value\n\n def _metric_exists(self, frame_number: int, metric_key: str) -> bool:\n return (frame_number in self._frame_metrics\n and metric_key in self._frame_metrics[frame_number])" }, { "identifier": "FrameMetricRegistered", "path": "backend/scenedetect/stats_manager.py", "snippet": "class FrameMetricRegistered(Exception):\n \"\"\" Raised when attempting to register a frame metric key which has\n already been registered. \"\"\"\n\n def __init__(self,\n metric_key: str,\n message: str = \"Attempted to re-register frame metric key.\"):\n super().__init__(message)\n self.metric_key = metric_key" } ]
import csv import threading import queue import logging import math import sys import cv2 import numpy as np from enum import Enum from typing import Iterable, List, Tuple, Optional, Dict, Callable, Union, TextIO from backend.scenedetect._thirdparty.simpletable import (SimpleTableCell, SimpleTableImage, SimpleTableRow, SimpleTable, HTMLPage) from backend.scenedetect.platform import (tqdm, get_and_create_path, get_cv2_imwrite_params, Template) from backend.scenedetect.frame_timecode import FrameTimecode from backend.scenedetect.video_stream import VideoStream from backend.scenedetect.scene_detector import SceneDetector, SparseSceneDetector from backend.scenedetect.stats_manager import StatsManager, FrameMetricRegistered
14,390
'%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) def write_scene_list_html(output_html_filename, scene_list, cut_list=None, css=None, css_class='mytable', image_filenames=None, image_width=None, image_height=None): """Writes the given list of scenes to an output file handle in html format. Arguments: output_html_filename: filename of output html file scene_list: List of pairs of FrameTimecodes denoting each scene's start/end FrameTimecode. cut_list: Optional list of FrameTimecode objects denoting the cut list (i.e. the frames in the video that need to be split to generate individual scenes). If not passed, the start times of each scene (besides the 0th scene) is used instead. css: String containing all the css information for the resulting html page. css_class: String containing the named css class image_filenames: dict where key i contains a list with n elements (filenames of the n saved images from that scene) image_width: Optional desired width of images in table in pixels image_height: Optional desired height of images in table in pixels """ if not css: css = """ table.mytable { font-family: times; font-size:12px; color:#000000; border-width: 1px; border-color: #eeeeee; border-collapse: collapse; background-color: #ffffff; width=100%; max-width:550px; table-layout:fixed; } table.mytable th { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; background-color: #e6eed6; color:#000000; } table.mytable td { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; } #code { display:inline; font-family: courier; color: #3d9400; } #string { display:inline; font-weight: bold; } """ # Output Timecode list timecode_table = SimpleTable( [["Timecode List:"] + (cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]])], css_class=css_class) # Output list of scenes header_row = [ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ] for i, (start, end) in enumerate(scene_list): duration = end - start row = SimpleTableRow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) if image_filenames: for image in image_filenames[i]: row.add_cell( SimpleTableCell( SimpleTableImage(image, width=image_width, height=image_height))) if i == 0: scene_table = SimpleTable(rows=[row], header_row=header_row, css_class=css_class) else: scene_table.add_row(row=row) # Write html file page = HTMLPage() page.add_table(timecode_table) page.add_table(scene_table) page.css = css page.save(output_html_filename) # # TODO(v1.0): Refactor to take a SceneList object; consider moving this and save scene list # to a better spot, or just move them to scene_list.py. # def save_images(scene_list: List[Tuple[FrameTimecode, FrameTimecode]],
# -*- coding: utf-8 -*- # # PySceneDetect: Python-Based Video Scene Detector # ------------------------------------------------------------------- # [ Site: https://scenedetect.com ] # [ Docs: https://scenedetect.com/docs/ ] # [ Github: https://github.com/Breakthrough/PySceneDetect/ ] # # Copyright (C) 2014-2023 Brandon Castellano <http://www.bcastell.com>. # PySceneDetect is licensed under the BSD 3-Clause License; see the # included LICENSE file, or visit one of the above pages for details. # """``scenedetect.scene_manager`` Module This module implements :class:`SceneManager`, coordinates running a :mod:`SceneDetector <scenedetect.detectors>` over the frames of a video (:mod:`VideoStream <scenedetect.video_stream>`). Video decoding is done in a separate thread to improve performance. This module also contains other helper functions (e.g. :func:`save_images`) which can be used to process the resulting scene list. =============================================================== Usage =============================================================== The following example shows basic usage of a :class:`SceneManager`: .. code:: python from scenedetect import open_video, SceneManager, ContentDetector video = open_video(video_path) scene_manager = SceneManager() scene_manager.add_detector(ContentDetector()) # Detect all scenes in video from current position to end. scene_manager.detect_scenes(video) # `get_scene_list` returns a list of start/end timecode pairs # for each scene that was found. scenes = scene_manager.get_scene_list() An optional callback can also be invoked on each detected scene, for example: .. code:: python from scenedetect import open_video, SceneManager, ContentDetector # Callback to invoke on the first frame of every new scene detection. def on_new_scene(frame_img: numpy.ndarray, frame_num: int): print("New scene found at frame %d." % frame_num) video = open_video(test_video_file) scene_manager = SceneManager() scene_manager.add_detector(ContentDetector()) scene_manager.detect_scenes(video=video, callback=on_new_scene) To use a `SceneManager` with a webcam/device or existing `cv2.VideoCapture` device, use the :class:`VideoCaptureAdapter <scenedetect.backends.opencv.VideoCaptureAdapter>` instead of `open_video`. ======================================================================= Storing Per-Frame Statistics ======================================================================= `SceneManager` can use an optional :class:`StatsManager <scenedetect.stats_manager.StatsManager>` to save frame statistics to disk: .. code:: python from scenedetect import open_video, ContentDetector, SceneManager, StatsManager video = open_video(test_video_file) scene_manager = SceneManager(stats_manager=StatsManager()) scene_manager.add_detector(ContentDetector()) scene_manager.detect_scenes(video=video) scene_list = scene_manager.get_scene_list() print_scenes(scene_list=scene_list) # Save per-frame statistics to disk. scene_manager.stats_manager.save_to_csv(csv_file=STATS_FILE_PATH) The statsfile can be used to find a better threshold for certain inputs, or perform statistical analysis of the video. """ logger = logging.getLogger('pyscenedetect') # TODO: This value can and should be tuned for performance improvements as much as possible, # until accuracy falls, on a large enough dataset. This has yet to be done, but the current # value doesn't seem to have caused any issues at least. DEFAULT_MIN_WIDTH: int = 256 """The default minimum width a frame will be downscaled to when calculating a downscale factor.""" MAX_FRAME_QUEUE_LENGTH: int = 4 """Maximum number of decoded frames which can be buffered while waiting to be processed.""" PROGRESS_BAR_DESCRIPTION = 'Detected: %d | Progress' """Template to use for progress bar.""" class Interpolation(Enum): """Interpolation method used for image resizing. Based on constants defined in OpenCV.""" NEAREST = cv2.INTER_NEAREST """Nearest neighbor interpolation.""" LINEAR = cv2.INTER_LINEAR """Bilinear interpolation.""" CUBIC = cv2.INTER_CUBIC """Bicubic interpolation.""" AREA = cv2.INTER_AREA """Pixel area relation resampling. Provides moire'-free downscaling.""" LANCZOS4 = cv2.INTER_LANCZOS4 """Lanczos interpolation over 8x8 neighborhood.""" def compute_downscale_factor(frame_width: int, effective_width: int = DEFAULT_MIN_WIDTH) -> int: """Get the optimal default downscale factor based on a video's resolution (currently only the width in pixels is considered). The resulting effective width of the video will be between frame_width and 1.5 * frame_width pixels (e.g. if frame_width is 200, the range of effective widths will be between 200 and 300). Arguments: frame_width: Actual width of the video frame in pixels. effective_width: Desired minimum width in pixels. Returns: int: The default downscale factor to use to achieve at least the target effective_width. """ assert not (frame_width < 1 or effective_width < 1) if frame_width < effective_width: return 1 return frame_width // effective_width def get_scenes_from_cuts( cut_list: Iterable[FrameTimecode], start_pos: Union[int, FrameTimecode], end_pos: Union[int, FrameTimecode], base_timecode: Optional[FrameTimecode] = None, ) -> List[Tuple[FrameTimecode, FrameTimecode]]: """Returns a list of tuples of start/end FrameTimecodes for each scene based on a list of detected scene cuts/breaks. This function is called when using the :meth:`SceneManager.get_scene_list` method. The scene list is generated from a cutting list (:meth:`SceneManager.get_cut_list`), noting that each scene is contiguous, starting from the first to last frame of the input. If `cut_list` is empty, the resulting scene will span from `start_pos` to `end_pos`. Arguments: cut_list: List of FrameTimecode objects where scene cuts/breaks occur. base_timecode: The base_timecode of which all FrameTimecodes in the cut_list are based on. num_frames: The number of frames, or FrameTimecode representing duration, of the video that was processed (used to generate last scene's end time). start_frame: The start frame or FrameTimecode of the cut list. Used to generate the first scene's start time. base_timecode: [DEPRECATED] DO NOT USE. For backwards compatibility only. Returns: List of tuples in the form (start_time, end_time), where both start_time and end_time are FrameTimecode objects representing the exact time/frame where each scene occupies based on the input cut_list. """ # TODO(v0.7): Use the warnings module to turn this into a warning. if base_timecode is not None: logger.error('`base_timecode` argument is deprecated has no effect.') # Scene list, where scenes are tuples of (Start FrameTimecode, End FrameTimecode). scene_list = [] if not cut_list: scene_list.append((start_pos, end_pos)) return scene_list # Initialize last_cut to the first frame we processed,as it will be # the start timecode for the first scene in the list. last_cut = start_pos for cut in cut_list: scene_list.append((last_cut, cut)) last_cut = cut # Last scene is from last cut to end of video. scene_list.append((last_cut, end_pos)) return scene_list def write_scene_list(output_csv_file: TextIO, scene_list: Iterable[Tuple[FrameTimecode, FrameTimecode]], include_cut_list: bool = True, cut_list: Optional[Iterable[FrameTimecode]] = None) -> None: """Writes the given list of scenes to an output file handle in CSV format. Arguments: output_csv_file: Handle to open file in write mode. scene_list: List of pairs of FrameTimecodes denoting each scene's start/end FrameTimecode. include_cut_list: Bool indicating if the first row should include the timecodes where each scene starts. Should be set to False if RFC 4180 compliant CSV output is required. cut_list: Optional list of FrameTimecode objects denoting the cut list (i.e. the frames in the video that need to be split to generate individual scenes). If not specified, the cut list is generated using the start times of each scene following the first one. """ csv_writer = csv.writer(output_csv_file, lineterminator='\n') # If required, output the cutting list as the first row (i.e. before the header row). if include_cut_list: csv_writer.writerow( ["Timecode List:"] + cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]]) csv_writer.writerow([ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ]) for i, (start, end) in enumerate(scene_list): duration = end - start csv_writer.writerow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) def write_scene_list_html(output_html_filename, scene_list, cut_list=None, css=None, css_class='mytable', image_filenames=None, image_width=None, image_height=None): """Writes the given list of scenes to an output file handle in html format. Arguments: output_html_filename: filename of output html file scene_list: List of pairs of FrameTimecodes denoting each scene's start/end FrameTimecode. cut_list: Optional list of FrameTimecode objects denoting the cut list (i.e. the frames in the video that need to be split to generate individual scenes). If not passed, the start times of each scene (besides the 0th scene) is used instead. css: String containing all the css information for the resulting html page. css_class: String containing the named css class image_filenames: dict where key i contains a list with n elements (filenames of the n saved images from that scene) image_width: Optional desired width of images in table in pixels image_height: Optional desired height of images in table in pixels """ if not css: css = """ table.mytable { font-family: times; font-size:12px; color:#000000; border-width: 1px; border-color: #eeeeee; border-collapse: collapse; background-color: #ffffff; width=100%; max-width:550px; table-layout:fixed; } table.mytable th { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; background-color: #e6eed6; color:#000000; } table.mytable td { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; } #code { display:inline; font-family: courier; color: #3d9400; } #string { display:inline; font-weight: bold; } """ # Output Timecode list timecode_table = SimpleTable( [["Timecode List:"] + (cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]])], css_class=css_class) # Output list of scenes header_row = [ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ] for i, (start, end) in enumerate(scene_list): duration = end - start row = SimpleTableRow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) if image_filenames: for image in image_filenames[i]: row.add_cell( SimpleTableCell( SimpleTableImage(image, width=image_width, height=image_height))) if i == 0: scene_table = SimpleTable(rows=[row], header_row=header_row, css_class=css_class) else: scene_table.add_row(row=row) # Write html file page = HTMLPage() page.add_table(timecode_table) page.add_table(scene_table) page.css = css page.save(output_html_filename) # # TODO(v1.0): Refactor to take a SceneList object; consider moving this and save scene list # to a better spot, or just move them to scene_list.py. # def save_images(scene_list: List[Tuple[FrameTimecode, FrameTimecode]],
video: VideoStream,
7
2023-10-25 02:50:01+00:00
16k
EulerSearch/embedding_studio
embedding_studio/models/plugin.py
[ { "identifier": "ClickstreamParser", "path": "embedding_studio/embeddings/data/clickstream/parsers/parser.py", "snippet": "class ClickstreamParser(object):\n # TODO: annotate types precisely\n def __init__(\n self,\n query_item_type: type,\n search_result_type: type,\n meta_type: type,\n event_type: type,\n ):\n self.query_item_type = query_item_type\n self.search_result_type = search_result_type\n self.meta_type = meta_type\n self.event_type = event_type\n\n def parse(self, session_data: Dict) -> RawClickstreamSession:\n return RawClickstreamSession.from_dict(\n session_data,\n self.query_item_type,\n self.search_result_type,\n self.meta_type,\n self.event_type,\n )\n\n # TODO: merge schemas\n def parse_from_mongo(\n self, session_data: SessionWithEvents\n ) -> RawClickstreamSession:\n return RawClickstreamSession.from_mongo(\n session_data,\n self.query_item_type,\n self.search_result_type,\n self.meta_type,\n self.event_type,\n )" }, { "identifier": "QueryRetriever", "path": "embedding_studio/embeddings/data/clickstream/query_retriever.py", "snippet": "class QueryRetriever(object):\n \"\"\"As we can't exactly predict a schema of storing queries:\n 1. As text exceptly in clickstream service\n 2. As ID of a record with a text\n 3. As a path to an image\n\n We provide an ability to use any query item. So, a user can specify any.\n\n \"\"\"\n\n def setup(self, clickstream_sessions: List[ClickstreamSession]):\n pass\n\n def __call__(self, query: QueryItem):\n return query" }, { "identifier": "ClickstreamSessionsSplitter", "path": "embedding_studio/embeddings/data/clickstream/splitter.py", "snippet": "class ClickstreamSessionsSplitter:\n def __init__(\n self,\n test_size_ratio: float = 0.2,\n shuffle: bool = True,\n random_state: Optional[int] = None,\n ):\n \"\"\"Generate train / test clickstream sessions split.\n\n :param test_size_ratio: ratio of test split size (default: 0.2)\n :param shuffle: to shuffle or not paired clickstream sessions (default: True)\n :param random_state: random state to sklearn splitter (default: None)\n \"\"\"\n if (\n not isinstance(test_size_ratio, float)\n or test_size_ratio <= 0\n or test_size_ratio >= 1.0\n ):\n raise ValueError(\n f\"test_size_ration is a numeric value in range (0.0, 1.0)\"\n )\n\n if test_size_ratio >= 0.5:\n logger.warning(\n \"test_size_ration is larger than 0.5. It's unusual for ML to have test size > train size.\"\n )\n\n self._test_size_ratio = test_size_ratio\n\n if not isinstance(shuffle, bool):\n raise ValueError(\"shuffle should be boolean\")\n self._shuffle = shuffle\n self._random_state = random_state\n\n @property\n def shuffle(self) -> bool:\n return self._shuffle\n\n def split(self, sessions: List[ClickstreamSession]) -> DatasetDict:\n \"\"\"Split clickstream sessions.\n\n :param sessions: sessions to be split\n :return: train / test splits accordingly (PairedClickstreamDataset)\n \"\"\"\n # Get all IDs\n all_result_ids: Set[str] = set()\n for session in sessions:\n all_result_ids.update(session.results)\n\n if len(all_result_ids) == 0:\n raise ValueError(\"Sessions list is empty\")\n\n # Ensure a minimum number of unique result IDs in each set\n min_unique_test_sessions: int = int(\n self._test_size_ratio * len(sessions)\n )\n\n # Split the result IDs into train and test sets\n train_result_ids, test_result_ids = train_test_split(\n list(all_result_ids),\n test_size=self._test_size_ratio,\n random_state=self._random_state,\n )\n test_result_ids: Set[str] = set(test_result_ids)\n\n # Split sessions into train and test based on result IDs\n train_sessions: List[ClickstreamSession] = []\n test_sessions: List[ClickstreamSession] = []\n\n for session in sessions:\n if len(session.results) == 0:\n continue\n\n if (\n len(set(session.results) & test_result_ids)\n / len(session.results)\n <= 0.5\n ):\n # If less than 50% of result IDs intersect with the test set, add to the train set\n train_sessions.append(session)\n else:\n test_sessions.append(session)\n\n if len(test_sessions) < min_unique_test_sessions:\n logger.warning(\n f\"Clickstream sessions intersects highly, so they are not split well\"\n )\n random_train_session_indexess: List[int] = random.choices(\n list(range(len(train_sessions))),\n k=min_unique_test_sessions - len(test_sessions),\n )\n for i in reversed(sorted(random_train_session_indexess)):\n test_sessions.append(train_sessions.pop(i))\n\n if len(test_sessions) + len(train_sessions) < len(sessions):\n missed_sessions_count = len(sessions) - (\n len(test_sessions) + len(train_sessions)\n )\n logger.warning(\n f\"Clickstream sessions weren't split correctly, add {missed_sessions_count} more sessions to the train split.\"\n )\n\n for session in sessions:\n if (\n session not in train_sessions\n and session not in test_sessions\n ):\n train_sessions.append(session)\n\n return DatasetDict(\n {\n \"train\": PairedClickstreamDataset(\n train_sessions, self.shuffle\n ),\n \"test\": PairedClickstreamDataset(test_sessions, self.shuffle),\n }\n )" }, { "identifier": "DataLoader", "path": "embedding_studio/embeddings/data/loaders/data_loader.py", "snippet": "class DataLoader(ABC):\n def __init__(self, **kwargs):\n pass\n\n @abstractmethod\n def load(self, items_data: List[ItemMeta]) -> Dataset:\n raise NotImplemented" }, { "identifier": "RankingData", "path": "embedding_studio/embeddings/data/ranking_data.py", "snippet": "class RankingData:\n def __init__(self, clickstream: DatasetDict, items: DatasetDict):\n self.clickstream = clickstream\n self.items = items" }, { "identifier": "ItemStorageProducer", "path": "embedding_studio/embeddings/data/storages/producer.py", "snippet": "class ItemStorageProducer:\n def __init__(\n self,\n preprocessor: ItemsDatasetDictPreprocessor,\n id_field_name: Optional[str] = None,\n ):\n \"\"\"Preprocess and split dataset with train/test clickstream sessions.\n\n :param preprocessor: items dataset dict preprocessing\n :param id_field_name: specified field name ID (default: None)\n \"\"\"\n self.preprocessor = preprocessor\n self._id_field_name = (\n id_field_name\n if id_field_name is not None\n else preprocessor.get_id_field_name()\n )\n\n @property\n def id_field_name(self) -> str:\n return self._id_field_name\n\n def _preprocess(self, dataset: DatasetDict) -> DatasetDict:\n logger.debug(\"Prerprocess a dataset\")\n return self.preprocessor.convert(dataset)\n\n def __call__(\n self,\n dataset: Union[Dataset, DatasetDict],\n clickstream_dataset: DatasetDict,\n ) -> DatasetDict:\n \"\"\"Split dataset with train_clickstream / test_clickstream\n\n :param dataset: dataset to be split\n :param clickstream_dataset: train /test clickstream sessions (PairedClickstreamDataset)\n :return: split dataset\n \"\"\"\n\n if not (\n isinstance(clickstream_dataset[\"train\"], PairedClickstreamDataset)\n and isinstance(\n clickstream_dataset[\"test\"], PairedClickstreamDataset\n )\n ):\n raise ValueError(\n \"clickstream_dataset values should be instances of PairedClickstreamDataset\"\n )\n\n if isinstance(dataset, Dataset):\n train_ids: Set[str] = clickstream_dataset[\n \"train\"\n ].irrelevant_ids.union(\n clickstream_dataset[\"train\"].not_irrelevant_ids\n )\n\n if len(train_ids) == 0:\n raise ValueError(\"Train clickstream is empty\")\n\n test_ids: Set[str] = clickstream_dataset[\n \"test\"\n ].irrelevant_ids.union(\n clickstream_dataset[\"test\"].not_irrelevant_ids\n )\n\n if len(test_ids) == 0:\n raise ValueError(\"Train clickstream is empty\")\n\n split_dataset: DatasetDict = DatasetDict(\n {\n \"train\": dataset.filter(\n lambda example: example[self.id_field_name]\n in train_ids\n ),\n \"test\": dataset.filter(\n lambda example: example[self.id_field_name] in test_ids\n ),\n }\n )\n\n else:\n logger.warning(f\"Provided dataset is already split\")\n split_dataset: DatasetDict = dataset\n\n return self._preprocess(split_dataset)" }, { "identifier": "DatasetFieldsNormalizer", "path": "embedding_studio/embeddings/data/utils/fields_normalizer.py", "snippet": "class DatasetFieldsNormalizer:\n ID_FIELD_NAME = \"item_id\"\n ITEM_FIELD_NAME = \"item\"\n\n def __init__(self, item_field_name: str, id_field_name: str):\n \"\"\"Unify column names in DatasetDict, so it can be used in fine-tuning script.\n A dataset should have ID column, related to ID in clickstream.\n\n :param item_field_name: name of column with items.\n :param id_field_name: name of ID column\n \"\"\"\n if not id_field_name:\n raise ValueError(\"id_field_name should be non-empty string\")\n self.id_field_name = id_field_name\n\n if not item_field_name:\n raise ValueError(\"item_field_name should be non-empty string\")\n self.item_field_name = item_field_name\n\n def __call__(self, dataset: DatasetDict) -> DatasetDict:\n id_normalizer = (\n lambda id_value: str(id_value.item())\n if (\n isinstance(id_value, Tensor)\n or isinstance(id_value, FloatTensor)\n )\n else str(id_value)\n )\n for key in dataset.keys():\n if (\n DatasetFieldsNormalizer.ID_FIELD_NAME\n not in dataset.column_names[key]\n ):\n dataset = dataset.rename_column(\n self.id_field_name, DatasetFieldsNormalizer.ID_FIELD_NAME\n )\n else:\n logger.warning(\n f\"Dataset {key} split already has {DatasetFieldsNormalizer.ID_FIELD_NAME} field\"\n )\n\n if (\n DatasetFieldsNormalizer.ITEM_FIELD_NAME\n not in dataset.column_names[key]\n ):\n dataset = dataset.rename_column(\n self.item_field_name,\n DatasetFieldsNormalizer.ITEM_FIELD_NAME,\n )\n else:\n logger.warning(\n f\"Dataset {key} split already has {DatasetFieldsNormalizer.ITEM_FIELD_NAME} field\"\n )\n\n return dataset.map(\n lambda example: {\n DatasetFieldsNormalizer.ID_FIELD_NAME: id_normalizer(\n example[DatasetFieldsNormalizer.ID_FIELD_NAME]\n )\n }\n )" }, { "identifier": "ExperimentsManager", "path": "embedding_studio/workers/fine_tuning/experiments/experiments_tracker.py", "snippet": "class ExperimentsManager:\n def __init__(\n self,\n tracking_uri: str,\n main_metric: str,\n accumulators: List[MetricsAccumulator],\n is_loss: bool = False,\n n_top_runs: int = 10,\n requirements: Optional[str] = None,\n retry_config: Optional[RetryConfig] = None,\n ):\n \"\"\"Wrapper over mlflow package to manage certain fine-tuning experiments.\n\n :param tracking_uri: url of MLFlow server\n :param main_metric: name of main metric that will be used to find best model\n :param accumulators: accumulators of metrics to be logged\n :param is_loss: is main metric loss (if True, then best quality is minimal) (default: False)\n :param n_top_runs: how many hyper params group consider to be used in following tuning steps (default: 10)\n :param requirements: extra requirements to be passed to mlflow.pytorch.log_model (default: None)\n :param retry_config: retry policy (default: None)\n \"\"\"\n if not isinstance(tracking_uri, str) or len(tracking_uri) == 0:\n raise ValueError(\n f\"MLFlow tracking URI value should be a not empty string\"\n )\n mlflow.set_tracking_uri(tracking_uri)\n self._tracking_uri = tracking_uri\n if self._tracking_uri.endswith(\"/\"):\n self._tracking_uri = self._tracking_uri[:-1]\n\n self.retry_config = (\n retry_config\n if retry_config\n else ExperimentsManager._get_default_retry_config()\n )\n self.attempt_exception_types = [RestException]\n\n if not isinstance(main_metric, str) or len(main_metric) == 0:\n raise ValueError(f\"main_metric value should be a not empty string\")\n self.main_metric = main_metric\n self._metric_field = f\"metrics.{self.main_metric}\"\n\n self._n_top_runs = n_top_runs\n self._is_loss = is_loss\n\n if len(accumulators) == 0:\n logger.warning(\n \"No accumulators were provided, there will be no metrics logged except loss\"\n )\n self._accumulators = accumulators\n\n self._requirements: List[str] = (\n _get_base_requirements() if requirements is None else requirements\n )\n\n self._iteration_experiment = None\n self._tuning_iteration = None\n self._tuning_iteration_id = None\n\n self._run = None\n self._run_params = None\n self._run_id = None\n\n def _check_artifact_exists(self, run_id, artifact_path):\n client = mlflow.MlflowClient()\n artifacts = client.list_artifacts(run_id, path=artifact_path)\n return any(artifact.path == artifact_path for artifact in artifacts)\n\n @staticmethod\n def _get_default_retry_config() -> RetryConfig:\n default_retry_params = RetryParams(\n max_attempts=settings.DEFAULT_MAX_ATTEMPTS,\n wait_time_seconds=settings.DEFAULT_WAIT_TIME_SECONDS,\n )\n\n config = RetryConfig(default_params=default_retry_params)\n config[\"log_metric\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOG_METRIC_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOG_METRIC_WAIT_TIME_SECONDS,\n )\n config[\"log_param\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOG_PARAM_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOG_PARAM_WAIT_TIME_SECONDS,\n )\n config[\"log_model\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOG_MODEL_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOG_MODEL_WAIT_TIME_SECONDS,\n )\n config[\"load_model\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOAD_MODEL_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOAD_MODEL_WAIT_TIME_SECONDS,\n )\n config[\"delete_model\"] = RetryParams(\n max_attempts=settings.MLFLOW_DELETE_MODEL_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_DELETE_MODEL_WAIT_TIME_SECONDS,\n )\n config[\"search_runs\"] = RetryParams(\n max_attempts=settings.MLFLOW_SEARCH_RUNS_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_SEARCH_RUNS_WAIT_TIME_SECONDS,\n )\n config[\"end_run\"] = RetryParams(\n max_attempts=settings.MLFLOW_END_RUN_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_END_RUN_WAIT_TIME_SECONDS,\n )\n config[\"get_run\"] = RetryParams(\n max_attempts=settings.MLFLOW_GET_RUN_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_GET_RUN_WAIT_TIME_SECONDS,\n )\n config[\"search_experiments\"] = RetryParams(\n max_attempts=settings.MLFLOW_SEARCH_EXPERIMENTS_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_SEARCH_EXPERIMENTS_WAIT_TIME_SECONDS,\n )\n config[\"delete_experiment\"] = RetryParams(\n max_attempts=settings.MLFLOW_DELETE_EXPERIMENT_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_DELETE_EXPERIMENT_WAIT_TIME_SECONDS,\n )\n config[\"create_experiment\"] = RetryParams(\n max_attempts=settings.MLFLOW_CREATE_EXPERIMENT_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_CREATE_EXPERIMENT_WAIT_TIME_SECONDS,\n )\n config[\"get_experiment\"] = RetryParams(\n max_attempts=settings.MLFLOW_GET_EXPERIMENT_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_GET_EXPERIMENT_WAIT_TIME_SECONDS,\n )\n\n return config\n\n @property\n def is_loss(self) -> bool:\n return self._is_loss\n\n def __del__(self):\n self.finish_run()\n self.finish_iteration()\n\n def is_retryable_error(self, e: Exception) -> bool:\n return False\n\n def _get_model_exists_filter(self) -> str:\n return \"metrics.model_uploaded = 1\"\n\n def _get_artifact_url(self, run_id: str, artifact_path: str) -> str:\n return (\n f\"{self._tracking_uri}/get-artifact?path=\"\n f'{urllib.parse.quote(artifact_path, safe=\"\")}&run_uuid={run_id}'\n )\n\n @retry_method(name=\"log_model\")\n def upload_initial_model(self, model: EmbeddingsModelInterface):\n \"\"\"Upload the very first, initial model to the mlflow server\n\n :param model: model to be uploaded\n \"\"\"\n self.finish_iteration()\n experiment_id = get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME)\n if experiment_id is None:\n logger.info(\n f\"Can't find any active iteration with name: {INITIAL_EXPERIMENT_NAME}\"\n )\n try:\n logger.info(\"Create initial experiment\")\n mlflow.create_experiment(INITIAL_EXPERIMENT_NAME)\n except MlflowException as e:\n if \"Cannot set a deleted experiment\" in str(e):\n logger.error(\n f\"Creation of initial experiment is failed: experiment with the same name {INITIAL_EXPERIMENT_NAME} is deleted, but not archived\"\n )\n experiments = mlflow.search_experiments(\n view_type=mlflow.entities.ViewType.ALL\n )\n deleted_experiment_id = None\n\n for exp in experiments:\n if exp.name == INITIAL_EXPERIMENT_NAME:\n deleted_experiment_id = exp.experiment_id\n break\n\n logger.info(\n f\"Restore deleted experiment with the same name: {INITIAL_EXPERIMENT_NAME}\"\n )\n mlflow.tracking.MlflowClient().restore_experiment(\n deleted_experiment_id\n )\n logger.info(\n f\"Archive deleted experiment with the same name: {INITIAL_EXPERIMENT_NAME}\"\n )\n mlflow.tracking.MlflowClient().rename_experiment(\n deleted_experiment_id,\n INITIAL_EXPERIMENT_NAME + \"_archive\",\n )\n logger.info(\n f\"Delete archived experiment with the same name: {INITIAL_EXPERIMENT_NAME}\"\n )\n mlflow.delete_experiment(deleted_experiment_id)\n logger.info(f\"Create initial experiment\")\n mlflow.create_experiment(INITIAL_EXPERIMENT_NAME)\n else:\n raise e\n\n with mlflow.start_run(\n experiment_id=get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME),\n run_name=INITIAL_RUN_NAME,\n ) as run:\n logger.info(\n f\"Upload initial model to {INITIAL_EXPERIMENT_NAME} / {INITIAL_RUN_NAME}\"\n )\n if self._check_artifact_exists(\n get_run_id_by_name(\n get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME),\n INITIAL_RUN_NAME,\n ),\n \"model\",\n ):\n logger.info(\"Model is already uploaded\")\n return\n\n mlflow.pytorch.log_model(\n model, \"model\", pip_requirements=self._requirements\n )\n logger.info(\"Uploading is finished\")\n\n @retry_method(name=\"load_model\")\n def download_initial_model(self) -> EmbeddingsModelInterface:\n \"\"\"Download initial model.\n\n :return: initial embeddings model\n \"\"\"\n model_uri: str = f\"runs:/{get_run_id_by_name(get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME), INITIAL_RUN_NAME)}/model\"\n logger.info(f\"Download the model from {model_uri}\")\n model = mlflow.pytorch.load_model(model_uri)\n logger.info(\"Downloading is finished\")\n return model\n\n @retry_method(name=\"search_runs\")\n def get_top_params(self) -> Optional[List[FineTuningParams]]:\n \"\"\"Get top N previous fine-tuning iteration best params\n\n :return: fine-tuning iteration params\n \"\"\"\n initial_id: Optional[str] = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n last_session_id: Optional[str] = self.get_previous_iteration_id()\n if initial_id == last_session_id:\n logger.warning(\n \"Can't retrieve top params, no previous iteration in history\"\n )\n return None\n\n else:\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[last_session_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs.status == \"FINISHED\"] # and only finished ones\n if runs.shape[0] == 0:\n logger.warning(\n \"Can't retrieve top params, no previous iteration's finished runs with uploaded model in history\"\n )\n return None\n\n # Get the indices that would sort the DataFrame based on the specified parameter\n sorted_indices: np.ndarray = np.argsort(\n runs[self._metric_field].values\n )\n if not self.is_loss:\n sorted_indices = sorted_indices[\n ::-1\n ] # Use [::-1] to sort in descending order\n\n # Extract the top N rows based on the sorted indices\n top_n_rows: np.ndarray = runs.iloc[\n sorted_indices[: self._n_top_runs]\n ]\n\n # Define a mapping dictionary to remove the \"params.\" prefix\n column_mapping: Dict[str, str] = {\n col: col.replace(\"params.\", \"\") for col in top_n_rows.columns\n }\n\n # Rename the columns\n top_n_rows: np.ndarray = top_n_rows.rename(\n columns=column_mapping\n ).to_dict(orient=\"records\")\n\n return [FineTuningParams(**row) for row in top_n_rows]\n\n def _get_best_previous_run_id(self) -> Tuple[Optional[str], bool]:\n initial_id: Optional[str] = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n last_session_id: Optional[str] = self.get_previous_iteration_id()\n if initial_id == last_session_id or last_session_id is None:\n return None, True\n else:\n run_id, _ = self._get_best_quality(last_session_id)\n return run_id, False\n\n def _get_best_current_run_id(self) -> Tuple[Optional[str], bool]:\n initial_id: Optional[str] = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n if (\n initial_id == self._tuning_iteration_id\n or self._tuning_iteration_id is None\n ):\n return None, True\n else:\n run_id, _ = self._get_best_quality(self._tuning_iteration_id)\n return run_id, False\n\n @retry_method(name=\"load_model\")\n def get_last_model_url(self) -> Optional[str]:\n run_id, is_initial = self._get_best_previous_run_id()\n if is_initial:\n logger.warning(\n \"Can't get the best model URL, no previous iteration in history\"\n )\n return None\n else:\n if run_id is None:\n logger.warning(\n \"Can't get the best model URL, no previous iterations \"\n \"finished runs with uploaded model in history\"\n )\n return None\n path = MODEL_ARTIFACT_PATH\n return self._get_artifact_url(run_id, path)\n\n @retry_method(name=\"load_model\")\n def get_current_model_url(self) -> Optional[str]:\n run_id, is_initial = self._get_best_current_run_id()\n if is_initial:\n logger.warning(\n \"Can't get the best model URL, current run is initial\"\n )\n return None\n\n if run_id is None:\n logger.warning(\n \"Can't get the best model URL, no iterations \"\n \"finished runs with uploaded model in history\"\n )\n return None\n path = MODEL_ARTIFACT_PATH\n return self._get_artifact_url(run_id, path)\n\n @retry_method(name=\"load_model\")\n def get_last_model(self) -> EmbeddingsModelInterface:\n \"\"\"Get previous iteration best embedding model.\n\n :return: best embedding model\n \"\"\"\n run_id, is_initial = self._get_best_previous_run_id()\n if is_initial:\n logger.warning(\n \"Download initial model, no previous iteration in history\"\n )\n return self.download_initial_model()\n\n else:\n if run_id is None:\n logger.warning(\n \"Download initial model, no previous iteration's \"\n \"finished runs with uploaded model in history\"\n )\n return self.download_initial_model()\n else:\n model_uri: str = f\"runs:/{run_id}/model\"\n logger.info(f\"Download the model from {model_uri}\")\n model = mlflow.pytorch.load_model(model_uri)\n logger.info(\"Downloading is finished\")\n return model\n\n @retry_method(name=\"load_model\")\n def get_current_model(self) -> Optional[EmbeddingsModelInterface]:\n \"\"\"Get current iteration best embedding model.\n\n :return: best embedding model\n \"\"\"\n if self._tuning_iteration is None:\n logger.error(\"No current iteration, can't get any model\")\n return\n\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n logger.info(\"Download initial model\")\n return self.download_initial_model()\n\n run_id, is_initial = self._get_best_current_run_id()\n model_uri: str = f\"runs:/{run_id}/model\"\n logger.info(f\"Download the model from {model_uri}\")\n model = mlflow.pytorch.load_model(model_uri)\n logger.info(\"Downloading is finished\")\n return model\n\n @retry_method(name=\"search_experiments\")\n def get_previous_iteration_id(self) -> Optional[str]:\n if (\n self._tuning_iteration == INITIAL_EXPERIMENT_NAME\n or self._tuning_iteration is None\n ):\n logger.warning(\n f\"Can't find previous iteration - no current iteration was setup\"\n )\n return None\n\n plugin_name = f\"{self._tuning_iteration.plugin_name}\"\n experiments: List[Experiment] = [\n e\n for e in mlflow.search_experiments()\n if (\n e.name.startswith(EXPERIMENT_PREFIX)\n and e.name.find(plugin_name) != -1\n and e.name != str(self._tuning_iteration)\n )\n ]\n if len(experiments) == 0:\n logger.warning(\"No iteration found\")\n return None\n else:\n return max(\n experiments, key=lambda exp: exp.creation_time\n ).experiment_id\n\n @retry_method(name=\"delete_experiment\")\n def delete_previous_iteration(self):\n experiment_id: Optional[str] = self.get_previous_iteration_id()\n\n logger.info(\"Delete models of previous iteration.\")\n runs = mlflow.search_runs(\n experiment_ids=[experiment_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs.status == \"FINISHED\"]\n run_ids = runs[\"run_id\"].tolist()\n\n for run_id in run_ids:\n self.delete_model(run_id, experiment_id)\n\n if experiment_id is not None:\n logger.info(\n f\"Iteration with ID {experiment_id} is going to be deleted\"\n )\n mlflow.tracking.MlflowClient().rename_experiment(\n experiment_id, INITIAL_EXPERIMENT_NAME + \"_archive\"\n )\n mlflow.delete_experiment(experiment_id)\n else:\n logger.warning(\n \"Can't delete a previous iteration, no previous iteration in history\"\n )\n\n @retry_method(name=\"create_experiment\")\n def set_iteration(self, iteration: FineTuningIteration):\n \"\"\"Start a new fine-tuning session.\n\n :param iteration: fine-tuning iteration info\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n self.finish_iteration()\n\n logger.info(\"Start a new fine-tuning iterations\")\n\n self._tuning_iteration = iteration\n self._tuning_iteration_id = get_experiment_id_by_name(str(iteration))\n if self._tuning_iteration_id is None:\n self._tuning_iteration_id = mlflow.create_experiment(\n str(iteration)\n )\n\n self._iteration_experiment = mlflow.set_experiment(\n experiment_id=self._tuning_iteration_id\n )\n\n @retry_method(name=\"start_run\")\n def set_run(self, params: FineTuningParams) -> bool:\n \"\"\"Start a new run with provided fine-tuning params\n\n :param params: provided fine-tuning params\n :return: True if it's a finished run (otherwise False)\n \"\"\"\n convert_value = (\n lambda value: \", \".join(map(str, value))\n if isinstance(value, list)\n else value\n )\n\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n # TODO: implement exception\n raise ValueError(\"You can't start run for initial iteration\")\n\n if self._run is not None:\n self.finish_run()\n\n logger.info(\n f\"Start a new run for iteration {self._tuning_iteration_id} with params:\\n\\t{str(params)}\"\n )\n\n self._run_params = params\n run_name: str = self._run_params.id\n self._run_id = get_run_id_by_name(self._tuning_iteration_id, run_name)\n\n self._run = mlflow.start_run(\n self._run_id, self._tuning_iteration_id, run_name\n )\n if self._run_id is None:\n self._run_id = self._run.info.run_id\n for key, value in dict(self._tuning_iteration).items():\n mlflow.log_param(key, convert_value(value))\n\n for key, value in dict(self._run_params).items():\n mlflow.log_param(key, convert_value(value))\n\n mlflow.log_metric(\"model_uploaded\", 0)\n\n return False\n else:\n return self._run.info.status == \"FINISHED\"\n\n @retry_method(name=\"search_runs\")\n def model_is_uploaded(self) -> bool:\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[self._tuning_iteration_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs[\"run_id\"] == self._run_id]\n return runs.shape[0] > 0\n\n @retry_method(name=\"get_experiment\")\n def finish_iteration(self):\n logger.info(f\"Finish current iteration {self._tuning_iteration_id}\")\n self._tuning_iteration = INITIAL_EXPERIMENT_NAME\n self._tuning_iteration_id = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n\n if self._tuning_iteration_id is None:\n self._iteration_experiment = mlflow.set_experiment(\n experiment_name=INITIAL_EXPERIMENT_NAME\n )\n self._tuning_iteration_id = (\n self._iteration_experiment.experiment_id\n )\n else:\n self._iteration_experiment = mlflow.set_experiment(\n experiment_id=self._tuning_iteration_id\n )\n\n logger.info(f\"Current iteration is finished\")\n\n @retry_method(name=\"end_run\")\n def finish_run(self):\n logger.info(\n f\"Finish current run {self._tuning_iteration_id} / {self._run_id}\"\n )\n for accumulator in self._accumulators:\n accumulator.clear()\n\n mlflow.end_run()\n\n # Set params to default None\n self._run = None\n self._run_params = None\n self._run_id = None\n\n logger.info(f\"Current run is finished\")\n\n @retry_method(name=\"log_param\")\n def _set_model_as_deleted(self, run_id: str, experiment_id: str):\n with mlflow.start_run(\n run_id=run_id, experiment_id=experiment_id\n ) as run:\n mlflow.log_metric(\"model_deleted\", 1)\n mlflow.log_metric(\"model_uploaded\", 0)\n\n @retry_method(name=\"delete_model\")\n def _delete_model(self, run_id: str, experiment_id: str) -> bool:\n logger.warning(\n f\"Unable to delete a model for run {run_id}, MLFlow has no such functionality, please implement on your own.\"\n )\n return False\n\n @retry_method(name=\"get_run\")\n def delete_model(self, run_id: str, experiment_id: Optional[str] = None):\n experiment_id = (\n self._tuning_iteration_id\n if experiment_id is None\n else experiment_id\n )\n if experiment_id is None:\n raise ValueError(\n f\"No iteration was initialized, unable to delete model.\"\n )\n\n if experiment_id == INITIAL_EXPERIMENT_NAME:\n raise ValueError(f\"Initial model can't be deleted.\")\n\n run_info = None\n try:\n run_info = mlflow.get_run(run_id=run_id)\n except RestException as e:\n if e.get_http_status_code() == 404:\n logger.exception(f\"Run with ID {run_id} doesn't exist.\")\n else:\n raise e\n\n if run_info is not None:\n runs: pd.DataFrame = mlflow.search_runs(\n filter_string=self._get_model_exists_filter()\n )\n runs = runs[runs[\"run_id\"] == run_id]\n if runs.shape[0] == 0:\n logger.warning(\n f\"Run {run_id} has no model being uploaded. Nothing to delete\"\n )\n\n else:\n deleted = None\n try:\n deleted = self._delete_model(run_id, experiment_id)\n except MaxAttemptsReachedException:\n pass\n\n if deleted:\n self._set_model_as_deleted(run_id, experiment_id)\n\n @retry_method(name=\"log_model\")\n def save_model(\n self, model: EmbeddingsModelInterface, best_only: bool = True\n ):\n \"\"\"Save fine-tuned embedding model\n\n :param model: model to be saved\n :param best_only: save only if it's the best (default: True)\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n raise ValueError(\n f\"Can't save not initial model for {INITIAL_EXPERIMENT_NAME} experiment\"\n )\n\n if self._run_id is None:\n raise ValueError(\"There is no current Run\")\n\n logger.info(\n f\"Save model for {self._tuning_iteration_id} / {self._run_id}\"\n )\n if not best_only:\n mlflow.pytorch.log_model(\n model, \"model\", pip_requirements=self._requirements\n )\n mlflow.log_metric(\"model_uploaded\", 1)\n logger.info(\"Upload is finished\")\n else:\n current_quality = self.get_quality()\n best_run_id, best_quality = self.get_best_quality()\n\n if best_run_id is None or (\n current_quality <= best_quality\n if self.is_loss\n else current_quality >= best_quality\n ):\n mlflow.pytorch.log_model(\n model, \"model\", pip_requirements=self._requirements\n )\n mlflow.log_metric(\"model_uploaded\", 1)\n logger.info(\"Upload is finished\")\n\n if best_run_id is not None:\n self.delete_model(best_run_id)\n else:\n logger.info(\"Not the best run - ignore saving\")\n\n @retry_method(name=\"log_metric\")\n def save_metric(self, metric_value: MetricValue):\n \"\"\"Accumulate and save metric value\n\n :param metric_value: value to be logged\n \"\"\"\n for accumulator in self._accumulators:\n for name, value in accumulator.accumulate(metric_value):\n mlflow.log_metric(name, value)\n\n @retry_method(name=\"search_runs\")\n def get_quality(self) -> float:\n \"\"\"Current run quality value\n\n :return: quality value\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n raise ValueError(\n f\"No metrics for {INITIAL_EXPERIMENT_NAME} experiment\"\n )\n\n if self._run_id is None:\n raise ValueError(\"There is no current Run\")\n\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[self._tuning_iteration_id]\n )\n quality: np.ndarray = runs[runs.run_id == self._run_id][\n self._metric_field\n ]\n return float(quality) if quality.shape[0] == 1 else float(quality[0])\n\n @retry_method(name=\"search_runs\")\n def _get_best_quality(\n self, experiment_id: str\n ) -> Tuple[Optional[str], float]:\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[experiment_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs.status == \"FINISHED\"] # and not finished ones\n if runs.shape[0] == 0:\n logger.warning(\n \"No finished experiments found with model uploaded, except initial\"\n )\n return None, 0.0\n\n else:\n value: float = (\n runs[self._metric_field].min()\n if self.is_loss\n else runs[self._metric_field].max()\n )\n best: pd.DataFrame = runs[runs[self._metric_field] == value][\n [\"run_id\", self._metric_field]\n ]\n return list(best.itertuples(index=False, name=None))[0]\n\n def get_best_quality(self) -> Tuple[str, float]:\n \"\"\"Get current fine-tuning iteration best quality\n\n :return: run_id and best metric value\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n raise ValueError(\n f\"No metrics for {INITIAL_EXPERIMENT_NAME} experiment\"\n )\n\n return self._get_best_quality(self._tuning_iteration_id)" }, { "identifier": "FineTuningSettings", "path": "embedding_studio/workers/fine_tuning/experiments/finetuning_settings.py", "snippet": "class FineTuningSettings(BaseModel):\n \"\"\"\n\n :param loss_func: loss object for a ranking task\n :param metric_calculators: list of trackable metrics calculators (default: None)\n by default only DistanceShift metric\n :param ranker: ranking function (query, items) -> ranks (defult: cosine similarity)\n :param is_similarity: is ranking function similarity like or distance (default: True)\n :param confidence_calculator: function to calculate results confidences (default: dummy_confidences)\n :param step_size: optimizer steps (default: 500)\n :param gamma: optimizers gamma (default: 0.9)\n :param num_epochs: num of training epochs (default: 10)\n :param batch_size: count of sessions in a batch (default: 1)\n :param test_each_n_sessions: frequency of validation, if value in range [0, 1] - used as ratio (default: -1)\n \"\"\"\n\n loss_func: RankingLossInterface\n metric_calculators: Optional[List[MetricCalculator]] = None\n ranker: Optional[\n Callable[[FloatTensor, FloatTensor], FloatTensor]\n ] = COSINE_SIMILARITY\n is_similarity: Optional[bool] = True\n confidence_calculator: Optional[Callable] = dummy_confidences\n step_size: Optional[int] = 500\n gamma: Optional[float] = 0.9\n num_epochs: Optional[int] = 10\n batch_size: Optional[int] = 1\n test_each_n_sessions: Optional[Union[float, int]] = -1\n\n class Config:\n arbitrary_types_allowed = True" }, { "identifier": "MetricsAccumulator", "path": "embedding_studio/workers/fine_tuning/experiments/metrics_accumulator.py", "snippet": "class MetricsAccumulator:\n def __init__(\n self,\n name: str,\n calc_mean: bool = False,\n calc_sliding: bool = False,\n calc_min: bool = False,\n calc_max: bool = False,\n window_size: int = 10,\n ):\n \"\"\"Accumulator of metric values + calculator of aggregations like mean, max, min, sliding_mean.\n\n :param name: metric name (metrics with other name will be ignored)\n :param calc_mean: should accumulator calculate mean value (default: False)\n :param calc_sliding: should accumulator calculate sliding mean value (default: False)\n :param calc_min: should accumulator calculate min value (default: False)\n :param calc_max: should accumulator calculate max value (default: False)\n :param window_size: size of sliding window (default: 10)\n \"\"\"\n if not isinstance(name, str) or len(name) == 0:\n raise ValueError(\"MetricsAccumulator's name should not be empty\")\n\n self._name = name\n\n if not isinstance(calc_mean, bool):\n raise ValueError(\"calc_mean value should be bool\")\n self._calc_mean = calc_mean\n\n if not isinstance(calc_sliding, bool):\n raise ValueError(\"calc_sliding value should be bool\")\n self._calc_sliding = calc_sliding\n\n if not isinstance(calc_min, bool):\n raise ValueError(\"calc_min value should be bool\")\n self._calc_min = calc_min\n\n if not isinstance(calc_max, bool):\n raise ValueError(\"calc_max value should be bool\")\n self._calc_max = calc_max\n\n if not isinstance(window_size, int) or window_size <= 1:\n raise ValueError(\n \"window_size value should be integer with value more than 1\"\n )\n\n self._window_size = window_size\n self._values = []\n\n @property\n def name(self) -> str:\n return self._name\n\n def clear(self):\n \"\"\"Clear accumulator\"\"\"\n self._values = []\n\n def accumulate(self, value: MetricValue) -> List[Tuple[str, float]]:\n \"\"\"Add metric value to an accumulator.\n\n :param value: metric to be accumulated\n :return: aggregations\n \"\"\"\n if self.name == value.name:\n self._values.append(value.value)\n\n return self.aggregate()\n\n return []\n\n def aggregate(self) -> List[Tuple[str, float]]:\n \"\"\"Aggregate accumulated metrics\n\n :return: metric aggregations (last, mean, sliding, min, max)\n \"\"\"\n aggregations: List[Tuple[str, float]] = []\n if len(self._values) > 0:\n aggregations.append((self.name, self._values[-1]))\n if self._calc_mean:\n aggregations.append(\n (f\"mean_{self.name}\", float(np.mean(self._values)))\n )\n\n if self._calc_sliding:\n slide_value = float(\n np.mean(self._values)\n if len(self._values) < self._window_size\n else np.mean(self._values[-self._window_size :])\n )\n aggregations.append((f\"sliding_{self.name}\", slide_value))\n\n if self._calc_min:\n aggregations.append((f\"min_{self.name}\", np.min(self._values)))\n\n if self._calc_max:\n aggregations.append((f\"max_{self.name}\", np.max(self._values)))\n\n return aggregations" } ]
from dataclasses import dataclass from typing import Any, Dict, List, Optional from pydantic import BaseModel from embedding_studio.embeddings.data.clickstream.parsers.parser import ( ClickstreamParser, ) from embedding_studio.embeddings.data.clickstream.query_retriever import ( QueryRetriever, ) from embedding_studio.embeddings.data.clickstream.splitter import ( ClickstreamSessionsSplitter, ) from embedding_studio.embeddings.data.loaders.data_loader import DataLoader from embedding_studio.embeddings.data.ranking_data import RankingData from embedding_studio.embeddings.data.storages.producer import ( ItemStorageProducer, ) from embedding_studio.embeddings.data.utils.fields_normalizer import ( DatasetFieldsNormalizer, ) from embedding_studio.workers.fine_tuning.experiments.experiments_tracker import ( ExperimentsManager, ) from embedding_studio.workers.fine_tuning.experiments.finetuning_settings import ( FineTuningSettings, ) from embedding_studio.workers.fine_tuning.experiments.metrics_accumulator import ( MetricsAccumulator, )
10,896
class PluginMeta(BaseModel): name: str version: str = "1.0.0" description: Optional[str] = None @dataclass class FineTuningBuilder: data_loader: DataLoader query_retriever: QueryRetriever clickstream_parser: ClickstreamParser clickstream_sessions_splitter: ClickstreamSessionsSplitter dataset_fields_normalizer: DatasetFieldsNormalizer item_storage_producer: ItemStorageProducer
class PluginMeta(BaseModel): name: str version: str = "1.0.0" description: Optional[str] = None @dataclass class FineTuningBuilder: data_loader: DataLoader query_retriever: QueryRetriever clickstream_parser: ClickstreamParser clickstream_sessions_splitter: ClickstreamSessionsSplitter dataset_fields_normalizer: DatasetFieldsNormalizer item_storage_producer: ItemStorageProducer
accumulators: List[MetricsAccumulator]
9
2023-10-31 00:33:13+00:00
16k
nv-tlabs/vid2player3d
uhc/smpllib/smpl_local_robot.py
[ { "identifier": "Skeleton", "path": "uhc/khrylib/mocap/skeleton_local.py", "snippet": "class Skeleton:\n def __init__(\n self, template_dir=\"/hdd/zen/dev/copycat/Copycat/assets/bigfoot_template_v1.pkl\"\n ):\n self.bones = []\n self.name2bone = {}\n self.mass_scale = 1.0\n self.len_scale = 1.0\n self.dof_name = [\"x\", \"y\", \"z\"]\n self.root = None\n self.template_geoms = None\n if osp.isfile(template_dir):\n self.template_geoms = joblib.load(template_dir)\n\n def forward_bvh(self, bone):\n if bone.parent:\n # bone.pos = bone.parent.pos + bone.offset\n bone.pos = bone.offset\n else:\n bone.pos = bone.offset\n for bone_c in bone.child:\n self.forward_bvh(bone_c)\n\n def load_from_offsets(\n self,\n offsets,\n parents,\n scale,\n jrange,\n exclude_bones=None,\n channels=None,\n spec_channels=None,\n ):\n if channels is None:\n channels = [\"x\", \"y\", \"z\"]\n if exclude_bones is None:\n exclude_bones = {}\n if spec_channels is None:\n spec_channels = dict()\n\n joint_names = list(\n filter(lambda x: all([t not in x for t in exclude_bones]), offsets.keys())\n )\n dof_ind = {\"x\": 0, \"y\": 1, \"z\": 2}\n self.len_scale = scale\n self.root = Bone()\n self.root.id = 0\n self.root.name = joint_names[0]\n self.root.channels = channels\n self.name2bone[self.root.name] = self.root\n self.bones.append(self.root)\n for i, joint in enumerate(joint_names[1:]):\n bone = Bone()\n bone.id = i + 1\n bone.name = joint\n \n bone.channels = (\n spec_channels[joint] if joint in spec_channels.keys() else channels\n )\n bone.dof_index = [dof_ind[x] for x in bone.channels]\n bone.offset = np.array(offsets[joint]) * self.len_scale\n bone.lb = np.rad2deg(jrange[joint][:, 0])\n bone.ub = np.rad2deg(jrange[joint][:, 1])\n\n\n self.bones.append(bone)\n self.name2bone[joint] = bone\n for bone in self.bones[1:]:\n parent_name = parents[bone.name]\n # print(parent_name)\n if parent_name in self.name2bone.keys():\n bone_p = self.name2bone[parent_name]\n bone_p.child.append(bone)\n bone.parent = bone_p\n\n self.forward_bvh(self.root)\n # import pdb\n # pdb.set_trace()\n for bone in self.bones:\n if len(bone.child) == 0:\n bone.end = bone.pos.copy() + 0.002\n for c_bone, p_bone in parents.items():\n if p_bone == bone.name:\n bone.end += np.array(offsets[c_bone]) * self.len_scale\n break\n else:\n bone.end = sum([bone_c.pos for bone_c in bone.child]) / len(bone.child)\n\n def write_xml(\n self,\n fname,\n template_fname=\"/hdd/zen/dev/copycat/Copycat/assets/mujoco_models/template/humanoid_template_local.xml\",\n offset=np.array([0, 0, 0]),\n ref_angles=None,\n bump_buffer=False,\n ):\n if ref_angles is None:\n ref_angles = {}\n parser = XMLParser(remove_blank_text=True)\n tree = parse(template_fname, parser=parser)\n worldbody = tree.getroot().find(\"worldbody\")\n self.write_xml_bodynode(self.root, worldbody, offset, ref_angles)\n\n # create actuators\n actuators = tree.getroot().find(\"actuator\")\n joints = worldbody.findall(\".//joint\")\n for joint in joints[1:]:\n name = joint.attrib[\"name\"]\n attr = dict()\n attr[\"name\"] = name\n attr[\"joint\"] = name\n attr[\"gear\"] = \"1\"\n SubElement(actuators, \"motor\", attr)\n if bump_buffer:\n SubElement(tree.getroot(), \"size\", {\"njmax\": \"700\", \"nconmax\": \"200\"})\n tree.write(fname, pretty_print=True)\n\n def write_str(\n self,\n template_fname=\"/hdd/zen/dev/copycat/Copycat/assets/mujoco_models/template/humanoid_template_local.xml\",\n offset=np.array([0, 0, 0]),\n ref_angles=None,\n bump_buffer=False,\n ):\n if ref_angles is None:\n ref_angles = {}\n parser = XMLParser(remove_blank_text=True)\n tree = parse(template_fname, parser=parser)\n worldbody = tree.getroot().find(\"worldbody\")\n self.write_xml_bodynode(self.root, worldbody, offset, ref_angles)\n\n # create actuators\n actuators = tree.getroot().find(\"actuator\")\n joints = worldbody.findall(\".//joint\")\n for joint in joints[1:]:\n name = joint.attrib[\"name\"]\n attr = dict()\n attr[\"name\"] = name\n attr[\"joint\"] = name\n attr[\"gear\"] = \"500\"\n SubElement(actuators, \"motor\", attr)\n if bump_buffer:\n SubElement(tree.getroot(), \"size\", {\"njmax\": \"700\", \"nconmax\": \"200\"})\n\n return etree.tostring(tree, pretty_print=False)\n\n def write_xml_bodynode(self, bone, parent_node, offset, ref_angles):\n attr = dict()\n attr[\"name\"] = bone.name\n attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(bone.pos + offset))\n attr[\"user\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(bone.end + offset))\n node = SubElement(parent_node, \"body\", attr)\n\n # write joints\n if bone.parent is None:\n j_attr = dict()\n j_attr[\"name\"] = bone.name\n # j_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(bone.pos + offset))\n j_attr[\"limited\"] = \"false\"\n j_attr[\"type\"] = \"free\"\n j_attr[\"armature\"] = \"0\"\n j_attr[\"damping\"] = \"0\"\n # j_attr[\"stiffness\"] = \"500\"\n SubElement(node, \"joint\", j_attr)\n else:\n for i in range(len(bone.dof_index)):\n ind = bone.dof_index[i]\n axis = bone.orient[:, ind]\n j_attr = dict()\n j_attr[\"name\"] = bone.name + \"_\" + self.dof_name[ind]\n j_attr[\"type\"] = \"hinge\"\n j_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(bone.pos + offset))\n j_attr[\"axis\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*axis)\n j_attr[\"stiffness\"] = \"500\"\n j_attr[\"damping\"] = \"50\"\n j_attr[\"armature\"] = \"0.02\"\n\n if i < len(bone.lb):\n j_attr[\"range\"] = \"{0:.4f} {1:.4f}\".format(bone.lb[i], bone.ub[i])\n else:\n j_attr[\"range\"] = \"-180.0 180.0\"\n if j_attr[\"name\"] in ref_angles.keys():\n j_attr[\"ref\"] = f\"{ref_angles[j_attr['name']]:.1f}\"\n\n SubElement(node, \"joint\", j_attr)\n\n # write geometry\n if self.template_geoms is None or len(self.template_geoms[bone.name]) == 0:\n if bone.parent is None:\n g_attr = dict()\n g_attr[\"size\"] = \"0.0300\"\n g_attr[\"type\"] = \"sphere\"\n g_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(bone.pos + offset))\n else:\n e1 = np.zeros(3)\n e2 = bone.end.copy() + offset\n g_attr = dict()\n g_attr[\"size\"] = \"0.0100\"\n if bone.name.endswith(\"3\"):\n g_attr[\"type\"] = \"sphere\"\n g_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(\n *(bone.pos + offset)\n )\n else:\n g_attr[\"type\"] = \"capsule\"\n g_attr[\n \"fromto\"\n ] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f} {4:.4f} {5:.4f}\".format(\n *np.concatenate([e1, e2])\n )\n\n g_attr[\"contype\"] = \"1\"\n g_attr[\"conaffinity\"] = \"1\"\n\n else:\n g_attr = dict()\n template_attributes = self.template_geoms[bone.name][0]\n g_attr[\"type\"] = template_attributes[\"type\"]\n # g_attr[\"contype\"] = template_attributes[\"contype\"]\n # g_attr[\"conaffinity\"] = template_attributes[\"conaffinity\"]\n g_attr[\"contype\"] = \"1\"\n g_attr[\"conaffinity\"] = \"1\"\n g_attr[\"density\"] = \"500\"\n e1 = np.zeros(3)\n e2 = bone.end.copy() + offset\n # template_attributes[\"start\"]\n if g_attr[\"type\"] == \"capsule\":\n g_attr[\n \"fromto\"\n ] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f} {4:.4f} {5:.4f}\".format(\n *np.concatenate(\n [e1, e2]\n )\n )\n g_attr[\"size\"] = \"{0:.4f}\".format(*template_attributes[\"size\"])\n elif g_attr[\"type\"] == \"box\":\n # g_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(\n # *template_attributes[\"start\"]\n # )\n multiplier = np.linalg.norm(e2 - e1) / 0.0945\n pos = (e1 + e2) / 2\n if bone.name == \"L_Toe\" or bone.name == \"R_Toe\":\n pos[1] += 0.05\n \n\n g_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*pos)\n\n g_attr[\"size\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(\n *template_attributes[\"size\"] * multiplier\n )\n g_attr[\"quat\"] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f}\".format(\n *template_attributes[\"rot\"]\n )\n elif g_attr[\"type\"] == \"sphere\":\n g_attr[\"size\"] = \"{0:.4f}\".format(*template_attributes[\"size\"])\n g_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(\n *np.zeros(3)\n )\n SubElement(node, \"geom\", g_attr)\n\n\n # write child bones\n for bone_c in bone.child:\n self.write_xml_bodynode(bone_c, node, offset, ref_angles)" }, { "identifier": "Skeleton", "path": "uhc/khrylib/mocap/skeleton_mesh_local.py", "snippet": "class Skeleton:\n def __init__(self, geom_dir, rel_geom_dir):\n self.geom_dir = geom_dir\n self.rel_geom_dir = rel_geom_dir\n self.bones = []\n self.name2bone = {}\n self.mass_scale = 1.0\n self.len_scale = 1.0\n self.root = None\n self.equalities = None\n self.exclude_contacts = None\n self.collision_groups = None\n self.simple_geom = False\n self.buffer_dict = {\"njmax\": \"2500\", \"nconmax\": \"500\"}\n\n def forward_bones(self, bone):\n if bone.parent:\n # bone.pos = bone.parent.pos + bone.offset\n bone.pos = bone.offset\n for bone_c in bone.child:\n self.forward_bones(bone_c)\n\n def load_from_offsets(\n self,\n offsets,\n parents,\n axes,\n channels,\n jrange,\n sites,\n scale,\n equalities,\n exclude_contacts=None,\n collision_groups=None,\n conaffinity=None,\n simple_geom=False,\n color_dict=None,\n ):\n if exclude_contacts is None:\n exclude_contacts = []\n if collision_groups is None:\n collision_groups = {}\n self.exclude_contacts = exclude_contacts\n self.collision_groups = {}\n self.conaffinity = {}\n self.color_dict = color_dict\n\n for group, bones in collision_groups.items():\n for bone in bones:\n self.collision_groups[bone] = group\n\n for group, bones in conaffinity.items():\n for bone in bones:\n self.conaffinity[bone] = group\n\n self.simple_geom = simple_geom\n\n joint_names = list(offsets.keys())\n dof_ind = {\"x\": 0, \"y\": 1, \"z\": 2}\n self.equalities = equalities\n self.len_scale = scale\n self.root = Bone()\n self.root.id = 0\n self.root.name = joint_names[0]\n self.root.orient = axes[joint_names[0]]\n self.root.pos = offsets[joint_names[0]]\n self.root.sites = sites.get(joint_names[0], [])\n self.name2bone[self.root.name] = self.root\n self.bones.append(self.root)\n\n for i, joint in enumerate(joint_names[1:]):\n bone = Bone()\n bone.id = i + 1\n bone.name = joint\n bone.channels = channels[joint]\n bone.dof_index = [dof_ind[x[0]] for x in bone.channels]\n bone.offset = offsets[joint] * self.len_scale\n bone.orient = axes[joint]\n bone.lb = np.rad2deg(jrange[joint][:, 0])\n bone.ub = np.rad2deg(jrange[joint][:, 1])\n bone.sites = sites.get(joint, [])\n self.bones.append(bone)\n self.name2bone[joint] = bone\n\n for bone in self.bones[1:]:\n parent_name = parents[bone.name]\n if parent_name in self.name2bone.keys():\n bone_p = self.name2bone[parent_name]\n bone_p.child.append(bone)\n bone.parent = bone_p\n\n self.forward_bones(self.root)\n for bone in self.bones:\n if len(bone.child) == 0:\n bone.ends.append(bone.pos.copy())\n else:\n for bone_c in bone.child:\n bone.ends.append(bone_c.pos.copy())\n\n def write_str(\n self,\n template_fname=TEMPLATE_FILE,\n offset=np.array([0, 0, 0]),\n ref_angles=None,\n bump_buffer=False,\n ):\n tree = self.construct_tree(ref_angles=ref_angles,\n offset=offset,\n template_fname=template_fname)\n if bump_buffer:\n SubElement(tree.getroot(), \"size\", self.buffer_dict)\n return etree.tostring(tree, pretty_print=True)\n\n def write_xml(\n self,\n fname,\n template_fname=TEMPLATE_FILE,\n offset=np.array([0, 0, 0]),\n ref_angles=None,\n bump_buffer=False,\n ):\n tree = self.construct_tree(ref_angles=ref_angles,\n offset=offset,\n template_fname=template_fname)\n if bump_buffer:\n SubElement(tree.getroot(), \"size\", self.buffer_dict)\n # create sensors\n # sensor = tree.getroot().find(\"sensor\")\n # for bone in self.bones:\n # SubElement(sensor, 'framelinvel', {'objtype': 'body', 'objname': bone.name})\n # for bone in self.bones:\n # SubElement(sensor, 'frameangvel', {'objtype': 'body', 'objname': bone.name})\n # for bone in self.bones:\n # SubElement(sensor, 'framelinvel', {'objtype': 'xbody', 'objname': bone.name})\n\n tree.write(fname, pretty_print=True)\n\n def construct_tree(\n self,\n template_fname=TEMPLATE_FILE,\n offset=np.array([0, 0, 0]),\n ref_angles=None,\n ):\n if ref_angles is None:\n ref_angles = {}\n parser = XMLParser(remove_blank_text=True)\n tree = parse(template_fname, parser=parser)\n worldbody = tree.getroot().find(\"worldbody\")\n\n self.write_xml_bodynode(self.root, worldbody, offset, ref_angles)\n\n # create meshes\n asset = tree.getroot().find(\"asset\")\n for bone in self.bones:\n if os.path.exists(f\"{self.geom_dir}/geom/{bone.name}.stl\"):\n attr = {\n \"file\": f\"{self.rel_geom_dir}/geom/{bone.name}.stl\",\n \"name\": f\"{bone.name}_mesh\"\n }\n SubElement(asset, \"mesh\", attr)\n\n # create actuators\n actuators = tree.getroot().find(\"actuator\")\n\n joints = worldbody.findall(\".//joint\")\n for joint in joints:\n name = joint.attrib[\"name\"]\n attr = dict()\n attr[\"name\"] = name\n attr[\"joint\"] = name\n attr[\"gear\"] = \"1\"\n SubElement(actuators, \"motor\", attr)\n\n # create exclude contacts\n c_node = tree.getroot().find(\"contact\")\n for bname1, bname2 in self.exclude_contacts:\n attr = {\"body1\": bname1, \"body2\": bname2}\n SubElement(c_node, \"exclude\", attr)\n # create equalities\n eq_node = tree.getroot().find(\"equality\")\n for eq_joints in self.equalities.values():\n for j1 in range(len(eq_joints) - 1):\n for j2 in range(j1 + 1, len(eq_joints)):\n jname1, jcoeff1 = eq_joints[j1]\n jname2, jcoeff2 = eq_joints[j2]\n coeff = jcoeff1 / jcoeff2\n attr = {\n \"joint1\": jname1,\n \"joint2\": jname2,\n \"polycoef\": f\"0 {coeff:.6f} 0 0 0\",\n }\n SubElement(eq_node, \"joint\", attr)\n return tree\n\n def write_xml_bodynode(self, bone, parent_node, offset, ref_angles):\n attr = dict()\n attr[\"name\"] = bone.name\n attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(bone.pos + offset))\n quat = quaternion_from_matrix(bone.orient)\n attr[\"quat\"] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f}\".format(*quat)\n node = SubElement(parent_node, \"body\", attr)\n\n # write joints\n if bone.parent is None:\n j_attr = dict()\n j_attr[\"name\"] = bone.name\n # j_attr[\"limited\"] = \"false\"\n # j_attr[\"type\"] = \"free\"\n # j_attr[\"armature\"] = \"0.02\"\n # j_attr[\"damping\"] = \"50\"\n # j_attr[\"stiffness\"] = \"500\"\n # j_attr[\"frictionloss\"] = \"0\"\n \n SubElement(node, \"freejoint\", j_attr)\n else:\n\n for i in range(len(bone.channels)):\n ind = bone.dof_index[i]\n axis = bone.orient[:, ind]\n j_attr = dict()\n \n \n j_attr[\"name\"] = bone.name + \"_\" + bone.channels[i]\n j_attr[\"type\"] = \"hinge\"\n j_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(bone.pos +\n offset))\n j_attr[\"axis\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*axis)\n\n\n j_attr[\"stiffness\"] = str(GAINS[bone.name][0])\n j_attr[\"damping\"] = str(GAINS[bone.name][1])\n j_attr[\"armature\"] = \"0.02\"\n \n if i < len(bone.lb):\n j_attr[\"range\"] = \"{0:.4f} {1:.4f}\".format(\n bone.lb[i], bone.ub[i])\n else:\n j_attr[\"range\"] = \"-180.0 180.0\"\n if j_attr[\"name\"] in ref_angles.keys():\n j_attr[\"ref\"] = f\"{ref_angles[j_attr['name']]:.1f}\"\n SubElement(node, \"joint\", j_attr)\n\n # write sites\n for s_name, s_pos, s_quat in bone.sites:\n s_attr = {\"name\": s_name}\n s_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(s_pos + offset))\n s_attr[\"quat\"] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f}\".format(*s_quat)\n s_attr[\"type\"] = \"sphere\"\n s_attr[\"size\"] = \"0.03\"\n SubElement(node, \"site\", s_attr)\n\n # write geometry\n geom_path = f\"{self.geom_dir}/geom/{bone.name}.stl\"\n \n if not self.simple_geom:\n assert os.path.exists(geom_path)\n if os.path.exists(geom_path):\n g_attr = {\"type\": \"mesh\", \"mesh\": f\"{bone.name}_mesh\"}\n if bone.name in self.collision_groups.keys():\n g_attr[\"density\"] = \"900\"\n # g_attr[\"density\"] = \"400\"\n # g_attr[\"density\"] = \"1000\"\n \n g_attr[\"contype\"] = str(self.collision_groups[bone.name])\n g_attr[\"conaffinity\"] = str(self.conaffinity[bone.name])\n\n # g_attr[\"solimp\"] = \"0.9 0.95 0.001 0.5 2\"\n # g_attr[\"solref\"] = \"0.02 1\"\n # g_attr[\"size\"] = str(10)\n # g_attr[\"friction\"] = \"0.000000000005 0.000000000005 0.1\"\n if not self.color_dict is None:\n g_attr[\"rgba\"] = self.color_dict[bone.name]\n\n # if bone.name in [\"L_Ankle\", \"R_Ankle\", \"L_Toe\", \"R_Toe\"]:\n # g_attr[\"friction\"] = \"5 500 500\"\n # g_attr[\"solimp\"] = \"0.9 0.95 0.001 0.5 2\"\n # g_attr[\"solref\"] = \"0.02 1\"\n # g_attr[\"margin\"] = \"0.0000000000000000001\"\n\n # g_attr[\"solimp\"] = \"0.9 0.99 0.0001 0.5 2\"\n # g_attr[\"solref\"] = \"0.001 0.5\"\n # g_attr[\"condim\"] = \"6\"\n # g_attr[\"friction\"] = \"0 0 0\"\n\n SubElement(node, \"geom\", g_attr)\n else:\n for end in bone.ends:\n g_attr = dict()\n e1 = bone.pos + offset\n e2 = end + offset\n v = e2 - e1\n if np.linalg.norm(v) > 1e-6:\n v /= np.linalg.norm(v)\n e1 += v * 0.02\n e2 -= v * 0.02\n g_attr[\"type\"] = \"capsule\"\n g_attr[\n \"fromto\"] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f} {4:.4f} {5:.4f}\".format(\n *np.concatenate([e1, e2]))\n else:\n g_attr[\"type\"] = \"sphere\"\n g_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*bone.pos)\n g_attr[\"size\"] = \"0.0300\" if self.simple_geom else \"0.0100\"\n if not self.simple_geom:\n g_attr[\"contype\"] = \"0\"\n g_attr[\"conaffinity\"] = \"0\"\n elif bone.name in self.collision_groups.keys():\n group = str(self.collision_groups[bone.name])\n g_attr[\"contype\"] = group\n g_attr[\"conaffinity\"] = group\n SubElement(node, \"geom\", g_attr)\n\n # write child bones\n for bone_c in bone.child:\n self.write_xml_bodynode(bone_c, node, offset, ref_angles)" }, { "identifier": "SMPL_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPL_Parser(_SMPL):\n def __init__(self, *args, **kwargs):\n \"\"\"SMPL model constructor\n Parameters\n ----------\n model_path: str\n The path to the folder or to the file where the model\n parameters are stored\n data_struct: Strct\n A struct object. If given, then the parameters of the model are\n read from the object. Otherwise, the model tries to read the\n parameters from the given `model_path`. (default = None)\n create_global_orient: bool, optional\n Flag for creating a member variable for the global orientation\n of the body. (default = True)\n global_orient: torch.tensor, optional, Bx3\n The default value for the global orientation variable.\n (default = None)\n create_body_pose: bool, optional\n Flag for creating a member variable for the pose of the body.\n (default = True)\n body_pose: torch.tensor, optional, Bx(Body Joints * 3)\n The default value for the body pose variable.\n (default = None)\n create_betas: bool, optional\n Flag for creating a member variable for the shape space\n (default = True).\n betas: torch.tensor, optional, Bx10\n The default value for the shape member variable.\n (default = None)\n create_transl: bool, optional\n Flag for creating a member variable for the translation\n of the body. (default = True)\n transl: torch.tensor, optional, Bx3\n The default value for the transl variable.\n (default = None)\n dtype: torch.dtype, optional\n The data type for the created variables\n batch_size: int, optional\n The batch size used for creating the member variables\n joint_mapper: object, optional\n An object that re-maps the joints. Useful if one wants to\n re-order the SMPL joints to some other convention (e.g. MSCOCO)\n (default = None)\n gender: str, optional\n Which gender to load\n vertex_ids: dict, optional\n A dictionary containing the indices of the extra vertices that\n will be selected\n \"\"\"\n super(SMPL_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPL_BONE_ORDER_NAMES\n\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"x\", \"y\", \"z\"] for x in self.joint_names}\n self.joint_range = {\n x: np.hstack([np.ones([3, 1]) * -np.pi, np.ones([3, 1]) * np.pi])\n for x in self.joint_names\n }\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n\n # self.contype = {\n # 3: ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee','R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Neck', 'Head','L_Thorax', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Elbow', 'R_Wrist', 'R_Hand'],\n # 1: ['Chest', \"L_Shoulder\", \"R_Shoulder\"]\n # }\n\n # self.conaffinity = {\n # 1: ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee','R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Neck', 'Head','L_Thorax', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Elbow', 'R_Wrist', 'R_Hand'],\n # 3: ['Chest', \"L_Shoulder\", \"R_Shoulder\"]\n # }\n\n self.zero_pose = torch.zeros(1, 72).float()\n\n self.register_buffer('children_map', self._parents_to_children(self.parents))\n\n def _parents_to_children(self, parents):\n self.SPINE3_IDX = 9\n children = torch.ones_like(parents) * -1\n for i in range(24):\n if parents[i] != -1 and children[parents[i]] < 0:\n children[parents[i]] = i\n\n children[self.SPINE3_IDX] = -3\n children[0] = 3\n children[self.SPINE3_IDX] = SMPL_BONE_ORDER_NAMES.index('Neck')\n return children\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPL_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None, root_trans=None, root_scale=None):\n \"\"\"\n Pose should be batch_size x 72\n \"\"\"\n if pose.shape[1] != 72:\n pose = pose.reshape(-1, 72)\n\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n\n if th_betas.shape[-1] == 16:\n th_betas = th_betas[:, :10]\n\n batch_size = pose.shape[0]\n \n smpl_output = self.forward(\n betas=th_betas,\n transl=th_trans,\n body_pose=pose[:, 3:],\n global_orient=pose[:, :3],\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints[:, :24]\n # joints = smpl_output.joints[:,JOINST_TO_USE]\n if root_trans is not None:\n if root_scale is None:\n root_scale = torch.ones_like(root_trans[:, 0])\n cur_root_trans = joints[:, [0], :]\n vertices[:] = (vertices - cur_root_trans) * root_scale[:, None, None] + root_trans[:, None, :]\n joints[:] = (joints - cur_root_trans) * root_scale[:, None, None] + root_trans[:, None, :]\n return vertices, joints\n\n def get_offsets(self, betas=torch.zeros(1, 10).float()):\n with torch.no_grad():\n verts, jts = self.get_joints_verts(self.zero_pose, th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n jts_np = jts.detach().cpu().numpy()\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n names_smpl = self.joint_names\n offset_smpl_dict = {\n names_smpl[i]: offsets_smpl[i] for i in range(len(names_smpl))\n }\n parents_dict = {\n names_smpl[i]: names_smpl[parents[i]] for i in range(len(names_smpl))\n }\n channels = [\"z\", \"y\", \"x\"]\n return offset_smpl_dict, parents_dict, channels, self.joint_range\n\n def get_mesh_offsets(self, zero_pose=None, betas=torch.zeros(1, 10), scale=None, flatfoot=False):\n with torch.no_grad():\n joint_names = self.joint_names\n if zero_pose is None:\n verts, Jtr = self.get_joints_verts(self.zero_pose, th_betas=betas)\n else:\n verts, Jtr = self.get_joints_verts(zero_pose, th_betas=betas)\n\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n if scale is not None:\n verts *= scale\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n\n joint_pos = Jtr[0].numpy()\n if scale is not None:\n joint_pos *= scale\n joint_offsets = {\n joint_names[c]: (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n }\n\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )\n\n def get_mesh_offsets_batch(self, betas=torch.zeros(1, 10), flatfoot=False):\n with torch.no_grad():\n joint_names = self.joint_names\n verts, Jtr = self.get_joints_verts(self.zero_pose.repeat(betas.shape[0], 1), th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n\n joint_pos = Jtr\n joint_offsets = {\n joint_names[c]: (joint_pos[:, c] - joint_pos[:, p]) if c > 0 else joint_pos[:, c]\n for c, p in enumerate(smpl_joint_parents)\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n }\n\n skin_weights = self.lbs_weights\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )" }, { "identifier": "SMPLH_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPLH_Parser(_SMPLH):\n def __init__(self, *args, **kwargs):\n super(SMPLH_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPLH_BONE_ORDER_NAMES\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"z\", \"y\", \"x\"] for x in self.joint_names}\n self.joint_range = {\n x: np.hstack([np.ones([3, 1]) * -np.pi, np.ones([3, 1]) * np.pi])\n for x in self.joint_names\n }\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n # import ipdb\n # ipdb.set_trace()\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n self.zero_pose = torch.zeros(1, 156).float()\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPLH_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None):\n \"\"\"\n Pose should be batch_size x 156\n \"\"\"\n\n if pose.shape[1] != 156:\n pose = pose.reshape(-1, 156)\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n\n batch_size = pose.shape[0]\n smpl_output = self.forward(\n body_pose=pose[:, 3:66],\n global_orient=pose[:, :3],\n L_hand_pose=pose[:, 66:111],\n R_hand_pose=pose[:, 111:156],\n betas=th_betas,\n transl=th_trans,\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints\n # joints = smpl_output.joints[:,JOINST_TO_USE]\n return vertices, joints\n\n def get_offsets(self, betas=torch.zeros(1, 16).float()):\n with torch.no_grad():\n verts, jts = self.get_joints_verts(self.zero_pose, th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n jts_np = jts.detach().cpu().numpy()\n\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n names_smpl = self.joint_names\n offset_smpl_dict = {\n names_smpl[i]: offsets_smpl[i] for i in range(len(names_smpl))\n }\n parents_dict = {\n names_smpl[i]: names_smpl[parents[i]] for i in range(len(names_smpl))\n }\n parents_dict[\"Hips\"] = \"None\"\n channels = [\"z\", \"y\", \"x\"]\n return offset_smpl_dict, parents_dict, channels\n\n def get_mesh_offsets(self, betas=torch.zeros(1, 16), flatfoot = False):\n with torch.no_grad():\n joint_names = self.joint_names\n verts, Jtr = self.get_joints_verts(self.zero_pose, th_betas=betas)\n\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_pos = Jtr[0].numpy()\n joint_offsets = {\n joint_names[c]: (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n }\n\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )" }, { "identifier": "SMPLX_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPLX_Parser(_SMPLX):\n def __init__(self, *args, **kwargs):\n super(SMPLX_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPLH_BONE_ORDER_NAMES\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"z\", \"y\", \"x\"] for x in self.joint_names}\n self.joint_range = {\n x: np.hstack([np.ones([3, 1]) * -np.pi, np.ones([3, 1]) * np.pi])\n for x in self.joint_names\n }\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n # import ipdb\n # ipdb.set_trace()\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n self.zero_pose = torch.zeros(1, 156).float()\n self.joint_to_use = [\n SMPLX_BONE_ORDER_NAMES.index(i) for i in SMPLH_BONE_ORDER_NAMES\n ]\n self.parents_to_use = np.concatenate([np.arange(0, 22), np.arange(25, 55)])\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPLX_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None):\n \"\"\"\n Pose should be batch_size x 156\n \"\"\"\n\n if pose.shape[1] != 156:\n pose = pose.reshape(-1, 156)\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n \n batch_size = pose.shape[0]\n smpl_output = self.forward(\n body_pose=pose[:, 3:66],\n global_orient=pose[:, :3],\n left_hand_pose=pose[:, 66:111],\n right_hand_pose=pose[:, 111:156],\n betas=th_betas,\n transl=th_trans,\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints\n # return vertices, joints\n return vertices, joints\n\n def get_offsets(self, v_template=None):\n if not v_template is None:\n self.v_template = v_template\n with torch.no_grad():\n verts, jts = self.get_joints_verts(self.zero_pose)\n verts_np = verts.detach().cpu().numpy()\n jts_np = jts.detach().cpu().numpy()\n\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n names_smpl = self.joint_names\n offset_smpl_dict = {\n names_smpl[i]: offsets_smpl[i] for i in range(len(names_smpl))\n }\n parents_dict = {\n names_smpl[i]: names_smpl[parents[i]] for i in range(len(names_smpl))\n }\n parents_dict[\"Hips\"] = \"None\"\n channels = [\"z\", \"y\", \"x\"]\n return offset_smpl_dict, parents_dict, channels\n\n def get_mesh_offsets(self, v_template=None):\n if not v_template is None:\n self.v_template = v_template\n with torch.no_grad():\n # joint_names = self.joint_names\n joint_names = SMPLX_BONE_ORDER_NAMES\n verts, Jtr = self.get_joints_verts(self.zero_pose)\n \n\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_pos = Jtr[0].numpy()\n # print(\n # joint_pos.shape,\n # smpl_joint_parents.shape,\n # len(self.parents_to_use),\n # self.parents.cpu().numpy().shape,\n # )\n joint_offsets = {\n joint_names[c]: (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n if joint_names[c] in self.joint_names\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n if joint_names[i] in self.joint_names\n }\n\n verts = verts[0].numpy()\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()[:, self.parents_to_use]\n return (\n verts,\n joint_pos,\n skin_weights,\n self.joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )" }, { "identifier": "quadric_mesh_decimation", "path": "uhc/utils/geom.py", "snippet": "def quadric_mesh_decimation(fname, reduction_rate, verbose=False):\n reader = vtkSTLReader()\n reader.SetFileName(fname)\n reader.Update()\n inputPoly = reader.GetOutput()\n\n decimate = vtkQuadricDecimation()\n decimate.SetInputData(inputPoly)\n decimate.SetTargetReduction(reduction_rate)\n decimate.Update()\n decimatedPoly = vtkPolyData()\n decimatedPoly.ShallowCopy(decimate.GetOutput())\n\n if verbose:\n print(\n f\"Mesh Decimation: (points, faces) goes from ({inputPoly.GetNumberOfPoints(), inputPoly.GetNumberOfPolys()}) \"\n f\"to ({decimatedPoly.GetNumberOfPoints(), decimatedPoly.GetNumberOfPolys()})\"\n )\n\n stlWriter = vtkSTLWriter()\n stlWriter.SetFileName(fname)\n stlWriter.SetFileTypeToBinary()\n stlWriter.SetInputData(decimatedPoly)\n stlWriter.Write()" }, { "identifier": "flags", "path": "uhc/utils/flags.py", "snippet": "class Flags(object):\n def __init__(self, *items):" } ]
import os import sys import time import argparse import torch import os.path as osp import mujoco_py import numpy as np import math import uuid import atexit import shutil from copy import deepcopy from lxml.etree import XMLParser, parse, Element, SubElement from lxml import etree from io import BytesIO from scipy.spatial import ConvexHull from stl import mesh from mujoco_py import load_model_from_path, MjSim, MjViewer from uhc.khrylib.mocap.skeleton_local import Skeleton from uhc.khrylib.mocap.skeleton_mesh_local import Skeleton as SkeletonMesh from uhc.smpllib.smpl_parser import ( SMPL_Parser, SMPLH_Parser, SMPLX_Parser, ) from uhc.utils.geom import quadric_mesh_decimation from uhc.utils.flags import flags
12,711
sys.path.append(os.getcwd()) def parse_vec(string): return np.fromstring(string, sep=" ") def parse_fromto(string): fromto = np.fromstring(string, sep=" ") return fromto[:3], fromto[3:] def normalize_range(value, lb, ub): return (value - lb) / (ub - lb) * 2 - 1 def denormalize_range(value, lb, ub): return (value + 1) * 0.5 * (ub - lb) + lb def vec_to_polar(v): phi = math.atan2(v[1], v[0]) theta = math.acos(v[2]) return np.array([theta, phi]) def polar_to_vec(p): v = np.zeros(3) v[0] = math.sin(p[0]) * math.cos(p[1]) v[1] = math.sin(p[0]) * math.sin(p[1]) v[2] = math.cos(p[0]) return v def in_hull(hull, queries): tolerance = 1e-3 if len(queries.shape) == 1: queries = queries[ None, ] return np.all( np.add(np.dot(queries, hull.equations[:, :-1].T), hull.equations[:, -1]) <= tolerance, axis=1, ) def get_joint_geometries( smpl_verts, smpl_jts, skin_weights, joint_names, geom_dir, scale_dict={}, suffix = None, verbose=False, min_num_vert = 50, ): vert_to_joint = skin_weights.argmax(axis=1) hull_dict = {} # create joint geometries os.makedirs(geom_dir, exist_ok=True) for jind, jname in enumerate(joint_names): vind = np.where(vert_to_joint == jind)[0] if len(vind) == 0: print(f"{jname} has no vertices!") continue vert = (smpl_verts[vind] - smpl_jts[jind]) * scale_dict.get(jname, 1) hull = ConvexHull(vert) norm_verts = vert - smpl_jts[jind] norm_hull = ConvexHull(norm_verts) hull_dict[jname] = { "norm_hull": norm_hull, "norm_verts": norm_verts, "verts": vert, "hull": hull, } # print(jname, hull.simplices.shape[0]) center = vert[hull.vertices].mean(axis=0) jgeom = mesh.Mesh(np.zeros(hull.simplices.shape[0], dtype=mesh.Mesh.dtype)) for i, f in enumerate(hull.simplices): for j in range(3): jgeom.vectors[i][j] = vert[f[j], :] # check if the face's normal is facing outward normal = np.cross( jgeom.vectors[i][1] - jgeom.vectors[i][0], jgeom.vectors[i][2] - jgeom.vectors[i][0], ) out_vec = jgeom.vectors[i].mean(axis=0) - center if np.dot(normal, out_vec) < 0: jgeom.vectors[i] = jgeom.vectors[i][[0, 2, 1]] # flip the face if suffix is None: fname = f"{geom_dir}/{jname}.stl" else: fname = f"{geom_dir}/{jname}_{suffix}.stl" jgeom.save(fname) # mesh simplification with vtk # min_num_vert = 50 min_num_vert = 50 cur_num_vert = len(hull.vertices) reduction_rate = min(0.9, 1.0 - min_num_vert / cur_num_vert)
sys.path.append(os.getcwd()) def parse_vec(string): return np.fromstring(string, sep=" ") def parse_fromto(string): fromto = np.fromstring(string, sep=" ") return fromto[:3], fromto[3:] def normalize_range(value, lb, ub): return (value - lb) / (ub - lb) * 2 - 1 def denormalize_range(value, lb, ub): return (value + 1) * 0.5 * (ub - lb) + lb def vec_to_polar(v): phi = math.atan2(v[1], v[0]) theta = math.acos(v[2]) return np.array([theta, phi]) def polar_to_vec(p): v = np.zeros(3) v[0] = math.sin(p[0]) * math.cos(p[1]) v[1] = math.sin(p[0]) * math.sin(p[1]) v[2] = math.cos(p[0]) return v def in_hull(hull, queries): tolerance = 1e-3 if len(queries.shape) == 1: queries = queries[ None, ] return np.all( np.add(np.dot(queries, hull.equations[:, :-1].T), hull.equations[:, -1]) <= tolerance, axis=1, ) def get_joint_geometries( smpl_verts, smpl_jts, skin_weights, joint_names, geom_dir, scale_dict={}, suffix = None, verbose=False, min_num_vert = 50, ): vert_to_joint = skin_weights.argmax(axis=1) hull_dict = {} # create joint geometries os.makedirs(geom_dir, exist_ok=True) for jind, jname in enumerate(joint_names): vind = np.where(vert_to_joint == jind)[0] if len(vind) == 0: print(f"{jname} has no vertices!") continue vert = (smpl_verts[vind] - smpl_jts[jind]) * scale_dict.get(jname, 1) hull = ConvexHull(vert) norm_verts = vert - smpl_jts[jind] norm_hull = ConvexHull(norm_verts) hull_dict[jname] = { "norm_hull": norm_hull, "norm_verts": norm_verts, "verts": vert, "hull": hull, } # print(jname, hull.simplices.shape[0]) center = vert[hull.vertices].mean(axis=0) jgeom = mesh.Mesh(np.zeros(hull.simplices.shape[0], dtype=mesh.Mesh.dtype)) for i, f in enumerate(hull.simplices): for j in range(3): jgeom.vectors[i][j] = vert[f[j], :] # check if the face's normal is facing outward normal = np.cross( jgeom.vectors[i][1] - jgeom.vectors[i][0], jgeom.vectors[i][2] - jgeom.vectors[i][0], ) out_vec = jgeom.vectors[i].mean(axis=0) - center if np.dot(normal, out_vec) < 0: jgeom.vectors[i] = jgeom.vectors[i][[0, 2, 1]] # flip the face if suffix is None: fname = f"{geom_dir}/{jname}.stl" else: fname = f"{geom_dir}/{jname}_{suffix}.stl" jgeom.save(fname) # mesh simplification with vtk # min_num_vert = 50 min_num_vert = 50 cur_num_vert = len(hull.vertices) reduction_rate = min(0.9, 1.0 - min_num_vert / cur_num_vert)
quadric_mesh_decimation(fname, reduction_rate, verbose=verbose)
5
2023-10-30 20:43:43+00:00
16k
masked-spacetime-hashing/msth
MSTH/SpaceTimeHashing/trainer.py
[ { "identifier": "ExperimentConfig", "path": "nerfstudio/configs/experiment_config.py", "snippet": "class ExperimentConfig(InstantiateConfig):\n \"\"\"Full config contents for running an experiment. Any experiment types (like training) will be\n subclassed from this, and must have their _target field defined accordingly.\"\"\"\n\n output_dir: Path = Path(\"outputs\")\n \"\"\"relative or absolute output directory to save all checkpoints and logging\"\"\"\n method_name: Optional[str] = None\n \"\"\"Method name. Required to set in python or via cli\"\"\"\n experiment_name: Optional[str] = None\n \"\"\"Experiment name. If None, will automatically be set to dataset name\"\"\"\n timestamp: str = \"{timestamp}\"\n \"\"\"Experiment timestamp.\"\"\"\n machine: MachineConfig = MachineConfig()\n \"\"\"Machine configuration\"\"\"\n logging: LoggingConfig = LoggingConfig()\n \"\"\"Logging configuration\"\"\"\n viewer: ViewerConfig = ViewerConfig()\n \"\"\"Viewer configuration\"\"\"\n pipeline: VanillaPipelineConfig = VanillaPipelineConfig()\n \"\"\"Pipeline configuration\"\"\"\n optimizers: Dict[str, Any] = to_immutable_dict(\n {\n \"fields\": {\n \"optimizer\": OptimizerConfig(),\n \"scheduler\": SchedulerConfig(),\n }\n }\n )\n \"\"\"Dictionary of optimizer groups and their schedulers\"\"\"\n vis: Literal[\"viewer\", \"wandb\", \"tensorboard\", \"viewer+wandb\", \"viewer+tensorboard\"] = \"wandb\"\n \"\"\"Which visualizer to use.\"\"\"\n data: Optional[Path] = None\n \"\"\"Alias for --pipeline.datamanager.data\"\"\"\n relative_model_dir: Path = Path(\"nerfstudio_models/\")\n \"\"\"Relative path to save all checkpoints.\"\"\"\n\n def is_viewer_enabled(self) -> bool:\n \"\"\"Checks if a viewer is enabled.\"\"\"\n return (\"viewer\" == self.vis) | (\"viewer+wandb\" == self.vis) | (\"viewer+tensorboard\" == self.vis)\n\n def is_wandb_enabled(self) -> bool:\n \"\"\"Checks if wandb is enabled.\"\"\"\n return (\"wandb\" == self.vis) | (\"viewer+wandb\" == self.vis)\n\n def is_tensorboard_enabled(self) -> bool:\n \"\"\"Checks if tensorboard is enabled.\"\"\"\n return (\"tensorboard\" == self.vis) | (\"viewer+tensorboard\" == self.vis)\n\n def set_timestamp(self) -> None:\n \"\"\"Dynamically set the experiment timestamp\"\"\"\n if self.timestamp == \"{timestamp}\":\n self.timestamp = datetime.now().strftime(\"%Y-%m-%d_%H%M%S\")\n\n def set_experiment_name(self) -> None:\n \"\"\"Dynamically set the experiment name\"\"\"\n if self.experiment_name is None:\n datapath = self.pipeline.datamanager.data\n if datapath is not None:\n datapath = datapath.parent if datapath.is_file() else datapath\n self.experiment_name = str(datapath.stem)\n else:\n self.experiment_name = \"unnamed\"\n\n def get_base_dir(self) -> Path:\n \"\"\"Retrieve the base directory to set relative paths\"\"\"\n # check the experiment and method names\n assert self.method_name is not None, \"Please set method name in config or via the cli\"\n self.set_experiment_name()\n return Path(f\"{self.output_dir}/{self.experiment_name}/{self.method_name}/{self.timestamp}\")\n\n def get_checkpoint_dir(self) -> Path:\n \"\"\"Retrieve the checkpoint directory\"\"\"\n return Path(self.get_base_dir() / self.relative_model_dir)\n\n def print_to_terminal(self) -> None:\n \"\"\"Helper to pretty print config to terminal\"\"\"\n CONSOLE.rule(\"Config\")\n CONSOLE.print(self)\n CONSOLE.rule(\"\")\n\n def save_config(self) -> None:\n \"\"\"Save config to base directory\"\"\"\n base_dir = self.get_base_dir()\n assert base_dir is not None\n base_dir.mkdir(parents=True, exist_ok=True)\n config_yaml_path = base_dir / \"config.yml\"\n CONSOLE.log(f\"Saving config to: {config_yaml_path}\")\n config_yaml_path.write_text(yaml.dump(self), \"utf8\")" }, { "identifier": "TrainingCallback", "path": "nerfstudio/engine/callbacks.py", "snippet": "class TrainingCallback:\n \"\"\"Callback class used during training.\n The function 'func' with 'args' and 'kwargs' will be called every 'update_every_num_iters' training iterations,\n including at iteration 0. The function is called after the training iteration.\n\n Args:\n where_to_run: List of locations for when to run callback (before/after iteration)\n func: The function that will be called.\n update_every_num_iters: How often to call the function `func`.\n iters: Tuple of iteration steps to perform callback\n args: args for the function 'func'.\n kwargs: kwargs for the function 'func'.\n \"\"\"\n\n def __init__(\n self,\n where_to_run: List[TrainingCallbackLocation],\n func: Callable,\n update_every_num_iters: Optional[int] = None,\n iters: Optional[Tuple[int, ...]] = None,\n args: Optional[List] = None,\n kwargs: Optional[Dict] = None,\n ):\n assert (\n \"step\" in signature(func).parameters.keys()\n ), f\"'step: int' must be an argument in the callback function 'func': {func.__name__}\"\n self.where_to_run = where_to_run\n self.update_every_num_iters = update_every_num_iters\n self.iters = iters\n self.func = func\n self.args = args if args is not None else []\n self.kwargs = kwargs if kwargs is not None else {}\n\n def run_callback(self, step: int) -> None:\n \"\"\"Callback to run after training step\n\n Args:\n step: current iteration step\n \"\"\"\n if self.update_every_num_iters is not None:\n if step % self.update_every_num_iters == 0:\n self.func(*self.args, **self.kwargs, step=step)\n elif self.iters is not None:\n if step in self.iters:\n self.func(*self.args, **self.kwargs, step=step)\n\n def run_callback_at_location(self, step: int, location: TrainingCallbackLocation) -> None:\n \"\"\"Runs the callback if it's supposed to be run at the given location.\n\n Args:\n step: current iteration step\n location: when to run callback (before/after iteration)\n \"\"\"\n if location in self.where_to_run:\n self.run_callback(step=step)" }, { "identifier": "TrainingCallbackAttributes", "path": "nerfstudio/engine/callbacks.py", "snippet": "class TrainingCallbackAttributes:\n \"\"\"Attributes that can be used to configure training callbacks.\n The callbacks can be specified in the Dataloader or Model implementations.\n Instead of providing access to the entire Trainer object, we only provide these attributes.\n This should be least prone to errors and fairly clean from a user perspective.\"\"\"\n\n # TODO(ethan): type this without circular imports\n optimizers: Optional[InitVar]\n \"\"\"optimizers for training\"\"\"\n grad_scaler: Optional[InitVar]\n \"\"\"gradient scalers\"\"\"\n pipeline: Optional[InitVar]\n \"\"\"reference to training pipeline\"\"\"" }, { "identifier": "TrainingCallbackLocation", "path": "nerfstudio/engine/callbacks.py", "snippet": "class TrainingCallbackLocation(Enum):\n \"\"\"Enum for specifying where the training callback should be run.\"\"\"\n\n BEFORE_TRAIN_ITERATION = auto()\n AFTER_TRAIN_ITERATION = auto()" }, { "identifier": "Optimizers", "path": "nerfstudio/engine/optimizers.py", "snippet": "class Optimizers:\n \"\"\"A set of optimizers.\n\n Args:\n config: The optimizer configuration object.\n param_groups: A dictionary of parameter groups to optimize.\n \"\"\"\n\n def __init__(self, config: Dict[str, Any], param_groups: Dict[str, List[Parameter]]) -> None:\n self.config = config\n self.optimizers = {}\n self.schedulers = {}\n self.parameters = {}\n for param_group_name, params in param_groups.items():\n lr_init = config[param_group_name][\"optimizer\"].lr\n self.optimizers[param_group_name] = config[param_group_name][\"optimizer\"].setup(params=params)\n self.parameters[param_group_name] = params\n if config[param_group_name][\"scheduler\"]:\n self.schedulers[param_group_name] = (\n config[param_group_name][\"scheduler\"]\n .setup()\n .get_scheduler(optimizer=self.optimizers[param_group_name], lr_init=lr_init)\n )\n\n def optimizer_step(self, param_group_name: str) -> None:\n \"\"\"Fetch and step corresponding optimizer.\n\n Args:\n param_group_name: name of optimizer to step forward\n \"\"\"\n self.optimizers[param_group_name].step()\n\n def scheduler_step(self, param_group_name: str) -> None:\n \"\"\"Fetch and step corresponding scheduler.\n\n Args:\n param_group_name: name of scheduler to step forward\n \"\"\"\n if self.config.param_group_name.scheduler: # type: ignore\n self.schedulers[param_group_name].step()\n\n def zero_grad_all(self) -> None:\n \"\"\"Zero the gradients for all optimizer parameters.\"\"\"\n for _, optimizer in self.optimizers.items():\n optimizer.zero_grad()\n\n def optimizer_scaler_step_all(self, grad_scaler: GradScaler) -> None:\n \"\"\"Take an optimizer step using a grad scaler.\n\n Args:\n grad_scaler: GradScaler to use\n \"\"\"\n for param_group, optimizer in self.optimizers.items():\n max_norm = self.config[param_group][\"optimizer\"].max_norm\n if max_norm is not None:\n grad_scaler.unscale_(optimizer)\n torch.nn.utils.clip_grad_norm_(self.parameters[param_group], max_norm)\n grad_scaler.step(optimizer)\n\n def optimizer_step_all(self) -> None:\n \"\"\"Run step for all optimizers.\"\"\"\n for param_group, optimizer in self.optimizers.items():\n # note that they key is the parameter name\n max_norm = self.config[param_group][\"optimizer\"].max_norm\n if max_norm is not None:\n torch.nn.utils.clip_grad_norm_(self.parameters[param_group], max_norm)\n optimizer.step()\n\n def scheduler_step_all(self, step: int) -> None:\n \"\"\"Run step for all schedulers.\n\n Args:\n step: the current step\n \"\"\"\n for param_group_name, scheduler in self.schedulers.items():\n scheduler.step()\n # TODO(ethan): clean this up. why is there indexing into a list?\n lr = scheduler.get_last_lr()[0]\n writer.put_scalar(name=f\"learning_rate/{param_group_name}\", scalar=lr, step=step)\n\n def load_optimizers(self, loaded_state: Dict[str, Any]) -> None:\n \"\"\"Helper to load the optimizer state from previous checkpoint\n\n Args:\n loaded_state: the state from the previous checkpoint\n \"\"\"\n for k, v in loaded_state.items():\n self.optimizers[k].load_state_dict(v)" }, { "identifier": "VanillaPipeline", "path": "nerfstudio/pipelines/base_pipeline.py", "snippet": "class VanillaPipeline(Pipeline):\n \"\"\"The pipeline class for the vanilla nerf setup of multiple cameras for one or a few scenes.\n\n config: configuration to instantiate pipeline\n device: location to place model and data\n test_mode:\n 'val': loads train/val datasets into memory\n 'test': loads train/test dataset into memory\n 'inference': does not load any dataset into memory\n world_size: total number of machines available\n local_rank: rank of current machine\n\n Attributes:\n datamanager: The data manager that will be used\n model: The model that will be used\n \"\"\"\n\n def __init__(\n self,\n config: VanillaPipelineConfig,\n device: str,\n test_mode: Literal[\"test\", \"val\", \"inference\"] = \"val\",\n world_size: int = 1,\n local_rank: int = 0,\n ):\n super().__init__()\n self.config = config\n self.test_mode = test_mode\n self.datamanager: VanillaDataManager = config.datamanager.setup(\n device=device, test_mode=test_mode, world_size=world_size, local_rank=local_rank\n )\n self.datamanager.to(device)\n # TODO(ethan): get rid of scene_bounds from the model\n assert self.datamanager.train_dataset is not None, \"Missing input dataset\"\n\n self._model = config.model.setup(\n scene_box=self.datamanager.train_dataset.scene_box,\n num_train_data=len(self.datamanager.train_dataset),\n metadata=self.datamanager.train_dataset.metadata,\n )\n self.model.to(device)\n\n self.world_size = world_size\n if world_size > 1:\n self._model = typing.cast(Model, DDP(self._model, device_ids=[local_rank], find_unused_parameters=True))\n dist.barrier(device_ids=[local_rank])\n\n @property\n def device(self):\n \"\"\"Returns the device that the model is on.\"\"\"\n return self.model.device\n\n @profiler.time_function\n def get_train_loss_dict(self, step: int):\n \"\"\"This function gets your training loss dict. This will be responsible for\n getting the next batch of data from the DataManager and interfacing with the\n Model class, feeding the data to the model's forward function.\n\n Args:\n step: current iteration step to update sampler if using DDP (distributed)\n \"\"\"\n ray_bundle, batch = self.datamanager.next_train(step)\n model_outputs = self.model(ray_bundle)\n metrics_dict = self.model.get_metrics_dict(model_outputs, batch)\n\n if self.config.datamanager.camera_optimizer is not None:\n camera_opt_param_group = self.config.datamanager.camera_optimizer.param_group\n if camera_opt_param_group in self.datamanager.get_param_groups():\n # Report the camera optimization metrics\n metrics_dict[\"camera_opt_translation\"] = (\n self.datamanager.get_param_groups()[camera_opt_param_group][0].data[:, :3].norm()\n )\n metrics_dict[\"camera_opt_rotation\"] = (\n self.datamanager.get_param_groups()[camera_opt_param_group][0].data[:, 3:].norm()\n )\n\n loss_dict = self.model.get_loss_dict(model_outputs, batch, metrics_dict)\n\n return model_outputs, loss_dict, metrics_dict\n\n def forward(self):\n \"\"\"Blank forward method\n\n This is an nn.Module, and so requires a forward() method normally, although in our case\n we do not need a forward() method\"\"\"\n raise NotImplementedError\n\n @profiler.time_function\n def get_eval_loss_dict(self, step: int):\n \"\"\"This function gets your evaluation loss dict. It needs to get the data\n from the DataManager and feed it to the model's forward function\n\n Args:\n step: current iteration step\n \"\"\"\n self.eval()\n ray_bundle, batch = self.datamanager.next_eval(step)\n model_outputs = self.model(ray_bundle)\n metrics_dict = self.model.get_metrics_dict(model_outputs, batch)\n loss_dict = self.model.get_loss_dict(model_outputs, batch, metrics_dict)\n self.train()\n return model_outputs, loss_dict, metrics_dict\n\n @profiler.time_function\n def get_eval_image_metrics_and_images(self, step: int):\n \"\"\"This function gets your evaluation loss dict. It needs to get the data\n from the DataManager and feed it to the model's forward function\n\n Args:\n step: current iteration step\n \"\"\"\n self.eval()\n image_idx, camera_ray_bundle, batch = self.datamanager.next_eval_image(step)\n # print(camera_ray_bundle.shape)\n # print(batch.keys())\n # print(batch[\"image_idx\"].shape)\n # print(batch[\"image\"].shape)\n outputs = self.model.get_outputs_for_camera_ray_bundle(camera_ray_bundle)\n metrics_dict, images_dict = self.model.get_image_metrics_and_images(outputs, batch)\n assert \"image_idx\" not in metrics_dict\n metrics_dict[\"image_idx\"] = image_idx\n assert \"num_rays\" not in metrics_dict\n metrics_dict[\"num_rays\"] = len(camera_ray_bundle)\n self.train()\n return metrics_dict, images_dict\n\n @profiler.time_function\n def get_average_eval_image_metrics(self, step: Optional[int] = None):\n \"\"\"Iterate over all the images in the eval dataset and get the average.\n\n Returns:\n metrics_dict: dictionary of metrics\n \"\"\"\n self.eval()\n metrics_dict_list = []\n num_images = len(self.datamanager.fixed_indices_eval_dataloader)\n with Progress(\n TextColumn(\"[progress.description]{task.description}\"),\n BarColumn(),\n TimeElapsedColumn(),\n MofNCompleteColumn(),\n transient=True,\n ) as progress:\n task = progress.add_task(\"[green]Evaluating all eval images...\", total=num_images)\n for camera_ray_bundle, batch in self.datamanager.fixed_indices_eval_dataloader:\n # time this the following line\n inner_start = time()\n height, width = camera_ray_bundle.shape\n num_rays = height * width\n outputs = self.model.get_outputs_for_camera_ray_bundle(camera_ray_bundle)\n metrics_dict, _ = self.model.get_image_metrics_and_images(outputs, batch)\n assert \"num_rays_per_sec\" not in metrics_dict\n metrics_dict[\"num_rays_per_sec\"] = num_rays / (time() - inner_start)\n fps_str = \"fps\"\n assert fps_str not in metrics_dict\n metrics_dict[fps_str] = metrics_dict[\"num_rays_per_sec\"] / (height * width)\n metrics_dict_list.append(metrics_dict)\n progress.advance(task)\n # average the metrics list\n metrics_dict = {}\n for key in metrics_dict_list[0].keys():\n metrics_dict[key] = float(\n torch.mean(torch.tensor([metrics_dict[key] for metrics_dict in metrics_dict_list]))\n )\n self.train()\n return metrics_dict\n\n def load_pipeline(self, loaded_state: Dict[str, Any], step: int) -> None:\n \"\"\"Load the checkpoint from the given path\n\n Args:\n loaded_state: pre-trained model state dict\n step: training step of the loaded checkpoint\n \"\"\"\n state = {key.replace(\"module.\", \"\"): value for key, value in loaded_state.items()}\n self._model.update_to_step(step)\n self.load_state_dict(state, strict=True)\n\n def get_training_callbacks(\n self, training_callback_attributes: TrainingCallbackAttributes\n ) -> List[TrainingCallback]:\n \"\"\"Returns the training callbacks from both the Dataloader and the Model.\"\"\"\n datamanager_callbacks = self.datamanager.get_training_callbacks(training_callback_attributes)\n model_callbacks = self.model.get_training_callbacks(training_callback_attributes)\n callbacks = datamanager_callbacks + model_callbacks\n return callbacks\n\n def get_param_groups(self) -> Dict[str, List[Parameter]]:\n \"\"\"Get the param groups for the pipeline.\n\n Returns:\n A list of dictionaries containing the pipeline's param groups.\n \"\"\"\n datamanager_params = self.datamanager.get_param_groups()\n model_params = self.model.get_param_groups()\n # TODO(ethan): assert that key names don't overlap\n return {**datamanager_params, **model_params}" }, { "identifier": "profiler", "path": "nerfstudio/utils/profiler.py", "snippet": "CONSOLE = Console(width=120)\nPROFILER = []\ndef time_function(func: Callable) -> Callable:\n def wrapper(*args, **kwargs):\ndef flush_profiler(config: cfg.LoggingConfig):\ndef setup_profiler(config: cfg.LoggingConfig):\n def __init__(self, config: cfg.LoggingConfig):\n def update_time(self, func_name: str, start_time: float, end_time: float):\n def print_profile(self):\nclass Profiler:" }, { "identifier": "writer", "path": "nerfstudio/utils/writer.py", "snippet": "CONSOLE = Console(width=120)\nEVENT_WRITERS = []\nEVENT_STORAGE = []\nGLOBAL_BUFFER = {}\n ITER_TRAIN_TIME = \"Train Iter (time)\"\n TOTAL_TRAIN_TIME = \"Train Total (time)\"\n ITER_VIS_TIME = \"Viewer Rendering (time)\"\n ETA = \"ETA (time)\"\n TRAIN_RAYS_PER_SEC = \"Train Rays / Sec\"\n TEST_RAYS_PER_SEC = \"Test Rays / Sec\"\n VIS_RAYS_PER_SEC = \"Vis Rays / Sec\"\n CURR_TEST_PSNR = \"Test PSNR\"\n TRAIN_LOSS = \"Train Loss\"\n TRAIN_PSNR = \"Train PSNR\"\n STATIC_TOTAL_TRAIN_TIME = \"Static Train (time)\"\n DYNAMIC_TOTAL_TRAIN_TIME = \"Dynamic Train (time)\"\n STATIC_ITER_TRAIN_TIME = \"Static Train Iter (time)\"\n DYNAMIC_ITER_TRAIN_TIME = \"Dynamic Train Iter (time)\"\n IMAGE = \"write_image\"\n SCALAR = \"write_scalar\"\n DICT = \"write_scalar_dict\"\n CONFIG = \"write_config\"\nclass EventName(enum.Enum):\nclass EventType(enum.Enum):\nclass Writer:\nclass TimeWriter:\nclass WandbWriter(Writer):\nclass TensorboardWriter(Writer):\nclass LocalWriter:\ndef put_image(name, image: TensorType[\"H\", \"W\", \"C\"], step: int):\ndef put_scalar(name: str, scalar: Any, step: int):\ndef put_dict(name: str, scalar_dict: Dict[str, Any], step: int):\ndef put_config(name: str, config_dict: Dict[str, Any], step: int):\ndef put_time(name: str, duration: float, step: int, avg_over_steps: bool = True, update_eta: bool = False):\ndef write_out_storage():\ndef setup_local_writer(config: cfg.LoggingConfig, max_iter: int, banner_messages: Optional[List[str]] = None) -> None:\ndef setup_event_writer(is_wandb_enabled: bool, is_tensorboard_enabled: bool, log_dir: Path, name: str) -> None:\n def write_image(self, name: str, image: TensorType[\"H\", \"W\", \"C\"], step: int) -> None:\n def write_scalar(self, name: str, scalar: Union[float, torch.Tensor], step: int) -> None:\n def write_scalar_dict(self, name: str, scalar_dict: Dict[str, Any], step: int) -> None:\n def __init__(self, writer, name, step=None, write=True):\n def __enter__(self):\n def __exit__(self, *args):\n def __init__(self, log_dir: Path, name: str = None):\n def write_image(self, name: str, image: TensorType[\"H\", \"W\", \"C\"], step: int) -> None:\n def write_scalar(self, name: str, scalar: Union[float, torch.Tensor], step: int) -> None:\n def write_config(self, name: str, config_dict: Dict[str, Any], step: int):\n def __init__(self, log_dir: Path):\n def write_image(self, name: str, image: TensorType[\"H\", \"W\", \"C\"], step: int) -> None:\n def write_scalar(self, name: str, scalar: Union[float, torch.Tensor], step: int) -> None:\n def write_config(self, name: str, config_dict: Dict[str, Any], step: int): # pylint: disable=unused-argument\ndef _cursorup(x: int):\ndef _format_time(seconds):\n def __init__(self, config: cfg.LocalWriterConfig, banner_messages: Optional[List[str]] = None):\n def write_stats_log(self, step: int) -> None:\n def write_config(self, name: str, config_dict: Dict[str, Any], step: int):\n def _consolidate_events(self):\n def _update_header(self, latest_map, new_key):\n def _print_stats(self, latest_map, padding=\" \"):" }, { "identifier": "check_eval_enabled", "path": "nerfstudio/utils/decorators.py", "snippet": "def check_eval_enabled(func: Callable) -> Callable:\n \"\"\"Decorator: check if evaluation step is enabled\"\"\"\n\n def wrapper(self, *args, **kwargs):\n ret = None\n if self.config.is_wandb_enabled() or self.config.is_tensorboard_enabled():\n ret = func(self, *args, **kwargs)\n return ret\n\n return wrapper" }, { "identifier": "check_main_thread", "path": "nerfstudio/utils/decorators.py", "snippet": "def check_main_thread(func: Callable) -> Callable:\n \"\"\"Decorator: check if you are on main thread\"\"\"\n\n def wrapper(*args, **kwargs):\n ret = None\n if comms.is_main_process():\n ret = func(*args, **kwargs)\n return ret\n\n return wrapper" }, { "identifier": "check_viewer_enabled", "path": "nerfstudio/utils/decorators.py", "snippet": "def check_viewer_enabled(func: Callable) -> Callable:\n \"\"\"Decorator: check if viewer is enabled and only run on main process\"\"\"\n\n def wrapper(self, *args, **kwargs):\n ret = None\n if self.config.is_viewer_enabled() and comms.is_main_process():\n ret = func(self, *args, **kwargs)\n return ret\n\n return wrapper" }, { "identifier": "step_check", "path": "nerfstudio/utils/misc.py", "snippet": "def step_check(step, step_size, run_at_zero=False) -> bool:\n \"\"\"Returns true based on current step and step interval.\"\"\"\n if step_size == 0:\n return False\n return (run_at_zero or step != 0) and step % step_size == 0" }, { "identifier": "EventName", "path": "nerfstudio/utils/writer.py", "snippet": "class EventName(enum.Enum):\n \"\"\"Names of possible events that can be logged via Local Writer for convenience.\n see config/logging/default_logging.yaml\"\"\"\n\n ITER_TRAIN_TIME = \"Train Iter (time)\"\n TOTAL_TRAIN_TIME = \"Train Total (time)\"\n ITER_VIS_TIME = \"Viewer Rendering (time)\"\n ETA = \"ETA (time)\"\n TRAIN_RAYS_PER_SEC = \"Train Rays / Sec\"\n TEST_RAYS_PER_SEC = \"Test Rays / Sec\"\n VIS_RAYS_PER_SEC = \"Vis Rays / Sec\"\n CURR_TEST_PSNR = \"Test PSNR\"\n TRAIN_LOSS = \"Train Loss\"\n TRAIN_PSNR = \"Train PSNR\"\n STATIC_TOTAL_TRAIN_TIME = \"Static Train (time)\"\n DYNAMIC_TOTAL_TRAIN_TIME = \"Dynamic Train (time)\"\n STATIC_ITER_TRAIN_TIME = \"Static Train Iter (time)\"\n DYNAMIC_ITER_TRAIN_TIME = \"Dynamic Train Iter (time)\"" }, { "identifier": "TimeWriter", "path": "nerfstudio/utils/writer.py", "snippet": "class TimeWriter:\n \"\"\"Timer context manager that calculates duration around wrapped functions\"\"\"\n\n def __init__(self, writer, name, step=None, write=True):\n self.writer = writer\n self.name = name\n self.step = step\n self.write = write\n\n self.start: float = 0.0\n self.duration: float = 0.0\n\n def __enter__(self):\n self.start = time()\n return self\n\n def __exit__(self, *args):\n self.duration = time() - self.start\n update_step = self.step is not None\n if self.write:\n self.writer.put_time(\n name=self.name,\n duration=self.duration,\n step=self.step if update_step else GLOBAL_BUFFER[\"max_iter\"],\n avg_over_steps=update_step,\n update_eta=self.name == EventName.ITER_TRAIN_TIME,\n )" }, { "identifier": "viewer_utils", "path": "nerfstudio/viewer/server/viewer_utils.py", "snippet": "CONSOLE = Console(width=120)\n INIT = \"init\"\n RGB = \"rgb\"\n RGB_FINE = \"rgb_fine\"\n ACCUMULATION = \"accumulation\"\n ACCUMULATION_FINE = \"accumulation_fine\"\n DEFAULT = \"default\"\n TURBO = \"turbo\"\n VIRIDIS = \"viridis\"\n MAGMA = \"magma\"\n INFERNO = \"inferno\"\n CIVIDIS = \"cividis\"\ndef get_viewer_version() -> str:\ndef setup_viewer(config: cfg.ViewerConfig, log_filename: Path, datapath: Path):\n def __init__(self, func):\n def __enter__(self):\n def __exit__(self, ext_type, exc_value, traceback):\n def __init__(self, state: \"ViewerState\", graph: Model, camera_ray_bundle: RayBundle):\n def run(self):\n def join(self, timeout=None):\n def __init__(self, state):\n def run(self):\n def __init__(self, config: cfg.ViewerConfig, log_filename: Path, datapath: Path):\n def _pick_drawn_image_idxs(self, total_num: int) -> list[int]:\n def init_scene(self, dataset: InputDataset, start_train=True) -> None:\n def _check_camera_path_payload(self, trainer, step: int):\n def _check_populate_paths_payload(self, trainer, step: int):\n def _check_webrtc_offer(self):\n def loop_in_thread(loop):\n def _update_render_aabb(self, graph):\n def update_scene(self, trainer, step: int, graph: Model, num_rays_per_batch: int) -> None:\n def check_interrupt(self, frame, event, arg): # pylint: disable=unused-argument\n def _get_camera_object(self):\n def _apply_colormap(self, outputs: Dict[str, Any], colors: torch.Tensor = None, eps=1e-6):\n async def send_webrtc_answer(self, data):\n def set_image(self, image):\n def _send_output_to_viewer(self, outputs: Dict[str, Any], colors: torch.Tensor = None):\n def _update_viewer_stats(self, render_time: float, num_rays: int, image_height: int, image_width: int) -> None:\n def _calculate_image_res(self, camera_object, is_training: bool) -> Optional[Tuple[int, int]]:\n def _process_invalid_output(self, output_type: str) -> str:\n def _render_image_in_viewer(self, camera_object, graph: Model, is_training: bool) -> None:\n def _calculate_rendering_fps(self, camera_object, is_training: bool):\nclass OutputTypes(str, enum.Enum):\nclass ColormapTypes(str, enum.Enum):\nclass IOChangeException(Exception):\nclass SetTrace:\nclass RenderThread(threading.Thread):\nclass CheckThread(threading.Thread):\nclass ViewerState:" }, { "identifier": "Timer", "path": "MSTH/utils.py", "snippet": "class Timer:\n recorder = defaultdict(list)\n\n def __init__(self, des=\"\", verbose=True, record=False) -> None:\n self.des = des\n self.verbose = verbose\n self.record = record\n\n def __enter__(self):\n return self\n self.start = time.time()\n self.start_cuda = torch.cuda.Event(enable_timing=True)\n self.end_cuda = torch.cuda.Event(enable_timing=True)\n self.start_cuda.record()\n return self\n\n def __exit__(self, *args):\n return\n self.end = time.time()\n self.end_cuda.record()\n self.interval = self.end - self.start\n if self.verbose:\n torch.cuda.synchronize()\n print(f\"[cudasync]{self.des} consuming {self.start_cuda.elapsed_time(self.end_cuda)/1000.:.8f}\")\n\n print(f\"{self.des} consuming {self.interval:.8f}\")\n if self.record:\n Timer.recorder[self.des].append(self.interval)\n\n @staticmethod\n def show_recorder():\n pprint(Timer.recorder)" }, { "identifier": "VideoPipeline", "path": "MSTH/video_pipeline.py", "snippet": "def module_wrapper(ddp_or_model: Union[DDP, Model]) -> Model:\n def __init__(\n self,\n config: VanillaPipelineConfig,\n device: str,\n test_mode: Literal[\"test\", \"val\", \"inference\"] = \"val\",\n world_size: int = 1,\n local_rank: int = 0,\n ):\n def device(self):\n def get_static_train_loss_dict(self, step: int):\n def get_dynamic_train_loss_dict(self, step: int):\n def hash_reinitialize(self, step: int, std: float):\n def set_static(self, step: int):\n def get_static_eval_loss_dict(self, step: int):\n def get_dynamic_eval_loss_dict(self, step: int):\n def get_eval_image_metrics_and_images(self, step: int):\n def get_cur_frame_eval_mask(self):\n def get_average_eval_image_metrics(self, step: Optional[int] = None):\n def load_pipeline(self, loaded_state: Dict[str, Any], step: int) -> None:\n def get_training_callbacks(\n self, training_callback_attributes: TrainingCallbackAttributes\n ) -> List[TrainingCallback]:\n def get_param_groups(self) -> Dict[str, List[Parameter]]:\n def tick(self):\n def cur_frame(self):\n def num_dynamic_rays(self):\n def num_static_rays(self):\n def get_eval_last_frame(self):\n def get_metric(self, image, rgb):\n def __init__(\n self,\n config: SpaceTimePipelineConfig,\n device: str,\n test_mode: Literal[\"test\", \"val\", \"inference\"] = \"val\",\n world_size: int = 1,\n local_rank: int = 0,\n ):\n def device(self):\n def get_train_loss_dict(self, step: int):\n def get_eval_loss_dict(self, step: int):\n def get_eval_image_metrics_and_images(self, step: int, interval=10, use_fast=False):\n def get_eval_image_metrics_and_images_fast(self, step: int, interval=10, thresh=0.9):\n def get_average_eval_image_metrics(self, step: Optional[int] = None):\n def get_eval_video(self, num_frames=None):\n def render_from_cameras(\n self,\n near=1.0,\n far=5.0,\n num_frames=None,\n cameras=None,\n save_path=None,\n fps=None,\n offset=None,\n render_depth=True,\n ):\n def get_param_groups(self) -> Dict[str, List[Parameter]]:\n def get_training_callbacks(\n self, training_callback_attributes: TrainingCallbackAttributes\n ) -> List[TrainingCallback]:\n def mock_eval(self):\n def load_pipeline(self, loaded_state: Dict[str, Any], step: int) -> None:\nclass VideoPipelineConfig(cfg.InstantiateConfig):\nclass VideoPipeline(Pipeline):\nclass SpaceTimePipelineConfig(cfg.InstantiateConfig):\nclass SpaceTimePipeline(Pipeline):" }, { "identifier": "Trainer", "path": "nerfstudio/engine/trainer.py", "snippet": "class Trainer:\n \"\"\"Trainer class\n\n Args:\n config: The configuration object.\n local_rank: Local rank of the process.\n world_size: World size of the process.\n\n Attributes:\n config: The configuration object.\n local_rank: Local rank of the process.\n world_size: World size of the process.\n device: The device to run the training on.\n pipeline: The pipeline object.\n optimizers: The optimizers object.\n callbacks: The callbacks object.\n \"\"\"\n\n pipeline: VanillaPipeline\n optimizers: Optimizers\n callbacks: List[TrainingCallback]\n\n def __init__(self, config: TrainerConfig, local_rank: int = 0, world_size: int = 1) -> None:\n self.config = config\n self.local_rank = local_rank\n self.world_size = world_size\n self.device: TORCH_DEVICE = \"cpu\" if world_size == 0 else f\"cuda:{local_rank}\"\n self.mixed_precision: bool = self.config.mixed_precision\n if self.device == \"cpu\":\n self.mixed_precision = False\n CONSOLE.print(\"Mixed precision is disabled for CPU training.\")\n self._start_step: int = 0\n # optimizers\n self.grad_scaler = GradScaler(enabled=self.mixed_precision)\n\n self.base_dir: Path = config.get_base_dir()\n # directory to save checkpoints\n self.checkpoint_dir: Path = config.get_checkpoint_dir()\n CONSOLE.log(f\"Saving checkpoints to: {self.checkpoint_dir}\")\n\n self.viewer_state = None\n\n def setup(self, test_mode: Literal[\"test\", \"val\", \"inference\"] = \"val\") -> None:\n \"\"\"Setup the Trainer by calling other setup functions.\n\n Args:\n test_mode:\n 'val': loads train/val datasets into memory\n 'test': loads train/test datasets into memory\n 'inference': does not load any dataset into memory\n \"\"\"\n self.pipeline = self.config.pipeline.setup(\n device=self.device, test_mode=test_mode, world_size=self.world_size, local_rank=self.local_rank\n )\n self.optimizers = self.setup_optimizers()\n\n self._load_checkpoint()\n\n self.callbacks = self.pipeline.get_training_callbacks(\n TrainingCallbackAttributes(\n optimizers=self.optimizers, # type: ignore\n grad_scaler=self.grad_scaler, # type: ignore\n pipeline=self.pipeline, # type: ignore\n )\n )\n\n # set up viewer if enabled\n viewer_log_path = self.base_dir / self.config.viewer.relative_log_filename\n self.viewer_state, banner_messages = None, None\n if self.config.is_viewer_enabled() and self.local_rank == 0:\n datapath = self.pipeline.datamanager.get_datapath()\n if datapath is None:\n datapath = self.base_dir\n self.viewer_state, banner_messages = viewer_utils.setup_viewer(\n self.config.viewer, log_filename=viewer_log_path, datapath=datapath\n )\n self._check_viewer_warnings()\n # set up writers/profilers if enabled\n writer_log_path = self.base_dir / self.config.logging.relative_log_dir\n writer.setup_event_writer(\n self.config.is_wandb_enabled(),\n self.config.is_tensorboard_enabled(),\n log_dir=writer_log_path,\n name=self.config.wandb_name,\n )\n writer.setup_local_writer(\n self.config.logging, max_iter=self.config.max_num_iterations, banner_messages=banner_messages\n )\n writer.put_config(name=\"config\", config_dict=dataclasses.asdict(self.config), step=0)\n profiler.setup_profiler(self.config.logging)\n\n def setup_optimizers(self) -> Optimizers:\n \"\"\"Helper to set up the optimizers\n\n Returns:\n The optimizers object given the trainer config.\n \"\"\"\n optimizer_config = self.config.optimizers.copy()\n param_groups = self.pipeline.get_param_groups()\n camera_optimizer_config = self.config.pipeline.datamanager.camera_optimizer\n if camera_optimizer_config is not None and camera_optimizer_config.mode != \"off\":\n assert camera_optimizer_config.param_group not in optimizer_config\n optimizer_config[camera_optimizer_config.param_group] = {\n \"optimizer\": camera_optimizer_config.optimizer,\n \"scheduler\": camera_optimizer_config.scheduler,\n }\n return Optimizers(optimizer_config, param_groups)\n\n def train(self) -> None:\n \"\"\"Train the model.\"\"\"\n assert self.pipeline.datamanager.train_dataset is not None, \"Missing DatsetInputs\"\n\n self.pipeline.datamanager.train_dataparser_outputs.save_dataparser_transform(\n self.base_dir / \"dataparser_transforms.json\"\n )\n\n self._init_viewer_state()\n with TimeWriter(writer, EventName.TOTAL_TRAIN_TIME):\n num_iterations = self.config.max_num_iterations\n step = 0\n for step in range(self._start_step, self._start_step + num_iterations):\n with TimeWriter(writer, EventName.ITER_TRAIN_TIME, step=step) as train_t:\n self.pipeline.train()\n\n # training callbacks before the training iteration\n for callback in self.callbacks:\n callback.run_callback_at_location(\n step, location=TrainingCallbackLocation.BEFORE_TRAIN_ITERATION\n )\n\n # time the forward pass\n loss, loss_dict, metrics_dict = self.train_iteration(step)\n\n # training callbacks after the training iteration\n for callback in self.callbacks:\n callback.run_callback_at_location(step, location=TrainingCallbackLocation.AFTER_TRAIN_ITERATION)\n\n # Skip the first two steps to avoid skewed timings that break the viewer rendering speed estimate.\n if step > 1:\n writer.put_time(\n name=EventName.TRAIN_RAYS_PER_SEC,\n duration=self.pipeline.datamanager.get_train_rays_per_batch() / train_t.duration,\n step=step,\n avg_over_steps=True,\n )\n\n self._update_viewer_state(step)\n\n # a batch of train rays\n if step_check(step, self.config.logging.steps_per_log, run_at_zero=True):\n writer.put_scalar(name=\"Train Loss\", scalar=loss, step=step)\n writer.put_dict(name=\"Train Loss Dict\", scalar_dict=loss_dict, step=step)\n writer.put_dict(name=\"Train Metrics Dict\", scalar_dict=metrics_dict, step=step)\n\n # Do not perform evaluation if there are no validation images\n if self.pipeline.datamanager.eval_dataset:\n self.eval_iteration(step)\n\n if step_check(step, self.config.steps_per_save):\n self.save_checkpoint(step)\n\n writer.write_out_storage()\n\n # save checkpoint at the end of training\n # self.save_checkpoint(step)\n\n # write out any remaining events (e.g., total train time)\n writer.write_out_storage()\n\n CONSOLE.rule()\n CONSOLE.print(\"[bold green]:tada: :tada: :tada: Training Finished :tada: :tada: :tada:\", justify=\"center\")\n if not self.config.viewer.quit_on_train_completion:\n CONSOLE.print(\"Use ctrl+c to quit\", justify=\"center\")\n self._always_render(step)\n\n @check_main_thread\n def _always_render(self, step: int) -> None:\n if self.viewer_state is not None:\n while True:\n self.viewer_state.vis[\"renderingState/isTraining\"].write(False)\n self._update_viewer_state(step)\n\n @check_main_thread\n def _check_viewer_warnings(self) -> None:\n \"\"\"Helper to print out any warnings regarding the way the viewer/loggers are enabled\"\"\"\n if (\n self.config.is_viewer_enabled()\n and not self.config.is_tensorboard_enabled()\n and not self.config.is_wandb_enabled()\n ):\n string: str = (\n \"[NOTE] Not running eval iterations since only viewer is enabled.\\n\"\n \"Use [yellow]--vis {wandb, tensorboard, viewer+wandb, viewer+tensorboard}[/yellow] to run with eval.\"\n )\n CONSOLE.print(f\"{string}\")\n\n @check_viewer_enabled\n def _init_viewer_state(self) -> None:\n \"\"\"Initializes viewer scene with given train dataset\"\"\"\n assert self.viewer_state and self.pipeline.datamanager.train_dataset\n self.viewer_state.init_scene(\n dataset=self.pipeline.datamanager.train_dataset,\n start_train=self.config.viewer.start_train,\n )\n if not self.config.viewer.start_train:\n self._always_render(self._start_step)\n\n @check_viewer_enabled\n def _update_viewer_state(self, step: int) -> None:\n \"\"\"Updates the viewer state by rendering out scene with current pipeline\n Returns the time taken to render scene.\n\n Args:\n step: current train step\n \"\"\"\n assert self.viewer_state is not None\n with TimeWriter(writer, EventName.ITER_VIS_TIME, step=step) as _:\n num_rays_per_batch: int = self.pipeline.datamanager.get_train_rays_per_batch()\n try:\n self.viewer_state.update_scene(self, step, self.pipeline.model, num_rays_per_batch)\n except RuntimeError:\n time.sleep(0.03) # sleep to allow buffer to reset\n assert self.viewer_state.vis is not None\n self.viewer_state.vis[\"renderingState/log_errors\"].write(\n \"Error: GPU out of memory. Reduce resolution to prevent viewer from crashing.\"\n )\n\n @check_viewer_enabled\n def _update_viewer_rays_per_sec(self, train_t: TimeWriter, vis_t: TimeWriter, step: int) -> None:\n \"\"\"Performs update on rays/sec calculation for training\n\n Args:\n train_t: timer object carrying time to execute total training iteration\n vis_t: timer object carrying time to execute visualization step\n step: current step\n \"\"\"\n train_num_rays_per_batch: int = self.pipeline.datamanager.get_train_rays_per_batch()\n writer.put_time(\n name=EventName.TRAIN_RAYS_PER_SEC,\n duration=train_num_rays_per_batch / (train_t.duration - vis_t.duration),\n step=step,\n avg_over_steps=True,\n )\n\n def _load_checkpoint(self) -> None:\n \"\"\"Helper function to load pipeline and optimizer from prespecified checkpoint\"\"\"\n load_dir: Path = self.config.load_dir\n if load_dir is not None:\n load_step = self.config.load_step\n if load_step is None:\n print(\"Loading latest checkpoint from load_dir\")\n # NOTE: this is specific to the checkpoint name format\n load_step = sorted(int(x[x.find(\"-\") + 1 : x.find(\".\")]) for x in os.listdir(load_dir))[-1]\n load_path: Path = load_dir / f\"step-{load_step:09d}.ckpt\"\n assert load_path.exists(), f\"Checkpoint {load_path} does not exist\"\n loaded_state = torch.load(load_path, map_location=\"cpu\")\n self._start_step = loaded_state[\"step\"] + 1\n # load the checkpoints for pipeline, optimizers, and gradient scalar\n self.pipeline.load_pipeline(loaded_state[\"pipeline\"], loaded_state[\"step\"])\n self.optimizers.load_optimizers(loaded_state[\"optimizers\"])\n self.grad_scaler.load_state_dict(loaded_state[\"scalers\"])\n CONSOLE.print(f\"done loading checkpoint from {load_path}\")\n else:\n CONSOLE.print(\"No checkpoints to load, training from scratch\")\n\n @check_main_thread\n def save_checkpoint(self, step: int) -> None:\n \"\"\"Save the model and optimizers\n\n Args:\n step: number of steps in training for given checkpoint\n \"\"\"\n # possibly make the checkpoint directory\n if not self.checkpoint_dir.exists():\n self.checkpoint_dir.mkdir(parents=True, exist_ok=True)\n # save the checkpoint\n ckpt_path: Path = self.checkpoint_dir / f\"step-{step:09d}.ckpt\"\n torch.save(\n {\n \"step\": step,\n \"pipeline\": self.pipeline.module.state_dict() # type: ignore\n if hasattr(self.pipeline, \"module\")\n else self.pipeline.state_dict(),\n \"optimizers\": {k: v.state_dict() for (k, v) in self.optimizers.optimizers.items()},\n \"scalers\": self.grad_scaler.state_dict(),\n },\n ckpt_path,\n )\n # possibly delete old checkpoints\n if self.config.save_only_latest_checkpoint:\n # delete everything else in the checkpoint folder\n for f in self.checkpoint_dir.glob(\"*\"):\n if f != ckpt_path:\n f.unlink()\n\n @profiler.time_function\n def train_iteration(self, step: int) -> TRAIN_INTERATION_OUTPUT:\n \"\"\"Run one iteration with a batch of inputs. Returns dictionary of model losses.\n\n Args:\n step: Current training step.\n \"\"\"\n self.optimizers.zero_grad_all()\n cpu_or_cuda_str: str = self.device.split(\":\")[0]\n with torch.autocast(device_type=cpu_or_cuda_str, enabled=self.mixed_precision):\n _, loss_dict, metrics_dict = self.pipeline.get_train_loss_dict(step=step)\n loss = functools.reduce(torch.add, loss_dict.values())\n self.grad_scaler.scale(loss).backward() # type: ignore\n self.optimizers.optimizer_scaler_step_all(self.grad_scaler)\n\n if self.config.log_gradients:\n total_grad = 0\n for tag, value in self.pipeline.model.named_parameters():\n assert tag != \"Total\"\n if value.grad is not None:\n grad = value.grad.norm()\n metrics_dict[f\"Gradients/{tag}\"] = grad\n total_grad += grad\n\n metrics_dict[\"Gradients/Total\"] = total_grad\n\n self.grad_scaler.update()\n self.optimizers.scheduler_step_all(step)\n\n # Merging loss and metrics dict into a single output.\n return loss, loss_dict, metrics_dict\n\n @check_eval_enabled\n @profiler.time_function\n def eval_iteration(self, step: int) -> None:\n \"\"\"Run one iteration with different batch/image/all image evaluations depending on step size.\n\n Args:\n step: Current training step.\n \"\"\"\n # a batch of eval rays\n if step_check(step, self.config.steps_per_eval_batch):\n _, eval_loss_dict, eval_metrics_dict = self.pipeline.get_eval_loss_dict(step=step)\n eval_loss = functools.reduce(torch.add, eval_loss_dict.values())\n writer.put_scalar(name=\"Eval Loss\", scalar=eval_loss, step=step)\n writer.put_dict(name=\"Eval Loss Dict\", scalar_dict=eval_loss_dict, step=step)\n writer.put_dict(name=\"Eval Metrics Dict\", scalar_dict=eval_metrics_dict, step=step)\n\n # one eval image\n if step_check(step, self.config.steps_per_eval_image):\n with TimeWriter(writer, EventName.TEST_RAYS_PER_SEC, write=False) as test_t:\n metrics_dict, images_dict = self.pipeline.get_eval_image_metrics_and_images(step=step)\n writer.put_time(\n name=EventName.TEST_RAYS_PER_SEC,\n duration=metrics_dict[\"num_rays\"] / test_t.duration,\n step=step,\n avg_over_steps=True,\n )\n writer.put_dict(name=\"Eval Images Metrics\", scalar_dict=metrics_dict, step=step)\n group = \"Eval Images\"\n for image_name, image in images_dict.items():\n writer.put_image(name=group + \"/\" + image_name, image=image, step=step)\n\n # all eval images\n if step_check(step, self.config.steps_per_eval_all_images):\n metrics_dict = self.pipeline.get_average_eval_image_metrics(step=step)\n writer.put_dict(name=\"Eval Images Metrics Dict (all images)\", scalar_dict=metrics_dict, step=step)" }, { "identifier": "TrainerConfig", "path": "nerfstudio/engine/trainer.py", "snippet": "class TrainerConfig(ExperimentConfig):\n \"\"\"Configuration for training regimen\"\"\"\n\n _target: Type = field(default_factory=lambda: Trainer)\n \"\"\"target class to instantiate\"\"\"\n steps_per_save: int = 1000\n \"\"\"Number of steps between saves.\"\"\"\n steps_per_eval_batch: int = 500\n \"\"\"Number of steps between randomly sampled batches of rays.\"\"\"\n steps_per_eval_image: int = 500\n \"\"\"Number of steps between single eval images.\"\"\"\n steps_per_eval_all_images: int = 25000\n \"\"\"Number of steps between eval all images.\"\"\"\n max_num_iterations: int = 1000000\n \"\"\"Maximum number of iterations to run.\"\"\"\n mixed_precision: bool = False\n \"\"\"Whether or not to use mixed precision for training.\"\"\"\n save_only_latest_checkpoint: bool = True\n \"\"\"Whether to only save the latest checkpoint or all checkpoints.\"\"\"\n # optional parameters if we want to resume training\n load_dir: Optional[Path] = None\n \"\"\"Optionally specify a pre-trained model directory to load from.\"\"\"\n load_step: Optional[int] = None\n \"\"\"Optionally specify model step to load from; if none, will find most recent model in load_dir.\"\"\"\n load_config: Optional[Path] = None\n \"\"\"Path to config YAML file.\"\"\"\n log_gradients: bool = False\n \"\"\"Optionally log gradients during training\"\"\"\n\n \"\"\" feng add \n load_pretrain_or_resume:\n resume:\n load all parameters including network, \n pretrain: \n \"\"\"\n load_pretrain_or_resume: str = \"resume\"\n wandb_name: str = \"none\"\n \"\"\" /feng add \"\"\"" } ]
import dataclasses import functools import os import time import numpy as np import torch import yappi import wandb from dataclasses import dataclass, field from pathlib import Path from typing import Dict, List, Optional, Tuple, Type, Union from rich.console import Console from torch.cuda.amp.grad_scaler import GradScaler from typing_extensions import Literal from nerfstudio.configs.experiment_config import ExperimentConfig from nerfstudio.engine.callbacks import ( TrainingCallback, TrainingCallbackAttributes, TrainingCallbackLocation, ) from nerfstudio.engine.optimizers import Optimizers from nerfstudio.pipelines.base_pipeline import VanillaPipeline from nerfstudio.utils import profiler, writer from nerfstudio.utils.decorators import ( check_eval_enabled, check_main_thread, check_viewer_enabled, ) from nerfstudio.utils.misc import step_check from nerfstudio.utils.writer import EventName, TimeWriter from nerfstudio.viewer.server import viewer_utils from MSTH.utils import Timer from MSTH.video_pipeline import ( VideoPipeline, VideoPipelineConfig, SpaceTimeDataManagerConfig, SpaceTimePipelineConfig, SpaceTimePipeline, ) from nerfstudio.engine.trainer import Trainer, TrainerConfig
13,359
from __future__ import annotations CONSOLE = Console(width=120) TRAIN_INTERATION_OUTPUT = Tuple[ # pylint: disable=invalid-name torch.Tensor, Dict[str, torch.Tensor], Dict[str, torch.Tensor] ] TORCH_DEVICE = Union[torch.device, str] # pylint: disable=invalid-name @dataclass class SpaceTimeHashingTrainerConfig(TrainerConfig): """Configuration for training regimen""" _target: Type = field(default_factory=lambda: SpaceTimeHashingTrainer) pipeline: SpaceTimePipelineConfig """target class to instantiate""" steps_per_save: int = 1000 """Number of steps between saves.""" steps_per_eval_batch: int = 500 """Number of steps between randomly sampled batches of rays.""" steps_per_eval_image: int = 2000 """Number of steps between single eval images.""" steps_per_eval_all_images: int = 25000 """Number of steps between eval all images.""" max_num_iterations: int = 1000000 """Maximum number of iterations to run.""" mixed_precision: bool = False """Whether or not to use mixed precision for training.""" save_only_latest_checkpoint: bool = True """Whether to only save the latest checkpoint or all checkpoints.""" # optional parameters if we want to resume training load_dir: Optional[Path] = None """Optionally specify a pre-trained model directory to load from.""" load_step: Optional[int] = None """Optionally specify model step to load from; if none, will find most recent model in load_dir.""" load_config: Optional[Path] = None """Path to config YAML file.""" log_gradients: bool = False """Optionally log gradients during training""" wandb_name: str = "none" steps_full_video: int = 10000000000 eval_total_frames: Optional[int] = None save_eval_video: bool = False render_camera_offset: Optional[List[float]] = None class SpaceTimeHashingTrainer(Trainer): config: SpaceTimeHashingTrainerConfig pipeline: SpaceTimePipeline optimizers: Optimizers callbacks: List[TrainingCallback] @profiler.time_function def train_iteration(self, step: int) -> TRAIN_INTERATION_OUTPUT: """Run one iteration with a batch of inputs. Returns dictionary of model losses. Args: step: Current training step. """ self.optimizers.zero_grad_all() self.pipeline.train() cpu_or_cuda_str: str = self.device.split(":")[0] with torch.autocast(device_type=cpu_or_cuda_str, enabled=self.mixed_precision): _, loss_dict, metrics_dict = self.pipeline.get_train_loss_dict(step=step) loss = functools.reduce(torch.add, loss_dict.values()) self.grad_scaler.scale(loss).backward() # type: ignore # TODO remove this # self.pipeline.model.field.mlp_base.spatial_net.grad_total_variation(weight=1e-2, B=10000) # print(self.pipeline.get_param_groups()["proposal_networks"][0].grad) self.optimizers.optimizer_scaler_step_all(self.grad_scaler) if self.config.log_gradients: total_grad = 0 for tag, value in self.pipeline.model.named_parameters(): assert tag != "Total" if value.grad is not None: grad = value.grad.norm() metrics_dict[f"Gradients/{tag}"] = grad total_grad += grad metrics_dict["Gradients/Total"] = total_grad self.grad_scaler.update() self.optimizers.scheduler_step_all(step) # Merging loss and metrics dict into a single output. return loss, loss_dict, metrics_dict @check_eval_enabled @profiler.time_function def eval_iteration(self, step: int) -> None: """Run one iteration with different batch/image/all image evaluations depending on step size. Args: step: Current training step. """ # a batch of eval rays # if step_check(step, self.config.steps_per_eval_batch): # _, eval_loss_dict, eval_metrics_dict = self.pipeline.get_eval_loss_dict(step=step) # eval_loss = functools.reduce(torch.add, eval_loss_dict.values()) # writer.put_scalar(name="Eval Loss", scalar=eval_loss, step=step) # writer.put_dict(name="Eval Loss Dict", scalar_dict=eval_loss_dict, step=step) # writer.put_dict(name="Eval Metrics Dict", scalar_dict=eval_metrics_dict, step=step) # one eval image
from __future__ import annotations CONSOLE = Console(width=120) TRAIN_INTERATION_OUTPUT = Tuple[ # pylint: disable=invalid-name torch.Tensor, Dict[str, torch.Tensor], Dict[str, torch.Tensor] ] TORCH_DEVICE = Union[torch.device, str] # pylint: disable=invalid-name @dataclass class SpaceTimeHashingTrainerConfig(TrainerConfig): """Configuration for training regimen""" _target: Type = field(default_factory=lambda: SpaceTimeHashingTrainer) pipeline: SpaceTimePipelineConfig """target class to instantiate""" steps_per_save: int = 1000 """Number of steps between saves.""" steps_per_eval_batch: int = 500 """Number of steps between randomly sampled batches of rays.""" steps_per_eval_image: int = 2000 """Number of steps between single eval images.""" steps_per_eval_all_images: int = 25000 """Number of steps between eval all images.""" max_num_iterations: int = 1000000 """Maximum number of iterations to run.""" mixed_precision: bool = False """Whether or not to use mixed precision for training.""" save_only_latest_checkpoint: bool = True """Whether to only save the latest checkpoint or all checkpoints.""" # optional parameters if we want to resume training load_dir: Optional[Path] = None """Optionally specify a pre-trained model directory to load from.""" load_step: Optional[int] = None """Optionally specify model step to load from; if none, will find most recent model in load_dir.""" load_config: Optional[Path] = None """Path to config YAML file.""" log_gradients: bool = False """Optionally log gradients during training""" wandb_name: str = "none" steps_full_video: int = 10000000000 eval_total_frames: Optional[int] = None save_eval_video: bool = False render_camera_offset: Optional[List[float]] = None class SpaceTimeHashingTrainer(Trainer): config: SpaceTimeHashingTrainerConfig pipeline: SpaceTimePipeline optimizers: Optimizers callbacks: List[TrainingCallback] @profiler.time_function def train_iteration(self, step: int) -> TRAIN_INTERATION_OUTPUT: """Run one iteration with a batch of inputs. Returns dictionary of model losses. Args: step: Current training step. """ self.optimizers.zero_grad_all() self.pipeline.train() cpu_or_cuda_str: str = self.device.split(":")[0] with torch.autocast(device_type=cpu_or_cuda_str, enabled=self.mixed_precision): _, loss_dict, metrics_dict = self.pipeline.get_train_loss_dict(step=step) loss = functools.reduce(torch.add, loss_dict.values()) self.grad_scaler.scale(loss).backward() # type: ignore # TODO remove this # self.pipeline.model.field.mlp_base.spatial_net.grad_total_variation(weight=1e-2, B=10000) # print(self.pipeline.get_param_groups()["proposal_networks"][0].grad) self.optimizers.optimizer_scaler_step_all(self.grad_scaler) if self.config.log_gradients: total_grad = 0 for tag, value in self.pipeline.model.named_parameters(): assert tag != "Total" if value.grad is not None: grad = value.grad.norm() metrics_dict[f"Gradients/{tag}"] = grad total_grad += grad metrics_dict["Gradients/Total"] = total_grad self.grad_scaler.update() self.optimizers.scheduler_step_all(step) # Merging loss and metrics dict into a single output. return loss, loss_dict, metrics_dict @check_eval_enabled @profiler.time_function def eval_iteration(self, step: int) -> None: """Run one iteration with different batch/image/all image evaluations depending on step size. Args: step: Current training step. """ # a batch of eval rays # if step_check(step, self.config.steps_per_eval_batch): # _, eval_loss_dict, eval_metrics_dict = self.pipeline.get_eval_loss_dict(step=step) # eval_loss = functools.reduce(torch.add, eval_loss_dict.values()) # writer.put_scalar(name="Eval Loss", scalar=eval_loss, step=step) # writer.put_dict(name="Eval Loss Dict", scalar_dict=eval_loss_dict, step=step) # writer.put_dict(name="Eval Metrics Dict", scalar_dict=eval_metrics_dict, step=step) # one eval image
if step_check(step, self.config.steps_per_eval_image):
11
2023-10-26 04:39:15+00:00
16k
Trustworthy-AI-Group/TransferAttack
transferattack/model_related/ghost.py
[ { "identifier": "Attack", "path": "transferattack/attack.py", "snippet": "class Attack(object):\n \"\"\"\n Base class for all attacks.\n \"\"\"\n def __init__(self, attack, model_name, epsilon, targeted, random_start, norm, loss, device=None):\n \"\"\"\n Initialize the hyperparameters\n\n Arguments:\n attack (str): the name of attack.\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n \"\"\"\n if norm not in ['l2', 'linfty']:\n raise Exception(\"Unsupported norm {}\".format(norm))\n self.attack = attack\n self.model = self.load_model(model_name)\n self.epsilon = epsilon\n self.targeted = targeted\n self.random_start = random_start\n self.norm = norm\n if isinstance(self.model, EnsembleModel):\n self.device = self.model.device\n else:\n self.device = next(self.model.parameters()).device if device is None else device\n self.loss = self.loss_function(loss)\n\n def load_model(self, model_name):\n \"\"\"\n The model Loading stage, which should be overridden when surrogate model is customized (e.g., DSM, SETR, etc.)\n Prioritize the model in torchvision.models, then timm.models\n\n Arguments:\n model_name (str/list): the name of surrogate model in model_list in utils.py\n\n Returns:\n model (torch.nn.Module): the surrogate model wrapped by wrap_model in utils.py\n \"\"\"\n def load_single_model(model_name):\n if model_name in models.__dict__.keys():\n print('=> Loading model {} from torchvision.models'.format(model_name))\n model = models.__dict__[model_name](weights=\"DEFAULT\")\n elif model_name in timm.list_models():\n print('=> Loading model {} from timm.models'.format(model_name))\n model = timm.create_model(model_name, pretrained=True)\n else:\n raise ValueError('Model {} not supported'.format(model_name))\n return wrap_model(model.eval().cuda())\n\n if isinstance(model_name, list):\n return EnsembleModel([load_single_model(name) for name in model_name])\n else:\n return load_single_model(model_name)\n\n def forward(self, data, label, **kwargs):\n \"\"\"\n The general attack procedure\n\n Arguments:\n data (N, C, H, W): tensor for input images\n labels (N,): tensor for ground-truth labels if untargetd\n labels (2,N): tensor for [ground-truth, targeted labels] if targeted\n \"\"\"\n if self.targeted:\n assert len(label) == 2\n label = label[1] # the second element is the targeted label tensor\n data = data.clone().detach().to(self.device)\n label = label.clone().detach().to(self.device)\n\n # Initialize adversarial perturbation\n delta = self.init_delta(data)\n\n momentum = 0\n for _ in range(self.epoch):\n # Obtain the output\n logits = self.get_logits(self.transform(data+delta, momentum=momentum))\n\n # Calculate the loss\n loss = self.get_loss(logits, label)\n\n # Calculate the gradients\n grad = self.get_grad(loss, delta)\n\n # Calculate the momentum\n momentum = self.get_momentum(grad, momentum)\n\n # Update adversarial perturbation\n delta = self.update_delta(delta, data, momentum, self.alpha)\n\n return delta.detach()\n\n def get_logits(self, x, **kwargs):\n \"\"\"\n The inference stage, which should be overridden when the attack need to change the models (e.g., ensemble-model attack, ghost, etc.) or the input (e.g. DIM, SIM, etc.)\n \"\"\"\n return self.model(x)\n\n def get_loss(self, logits, label):\n \"\"\"\n The loss calculation, which should be overrideen when the attack change the loss calculation (e.g., ATA, etc.)\n \"\"\"\n # Calculate the loss\n return -self.loss(logits, label) if self.targeted else self.loss(logits, label)\n\n\n def get_grad(self, loss, delta, **kwargs):\n \"\"\"\n The gradient calculation, which should be overridden when the attack need to tune the gradient (e.g., TIM, variance tuning, enhanced momentum, etc.)\n \"\"\"\n return torch.autograd.grad(loss, delta, retain_graph=False, create_graph=False)[0]\n\n def get_momentum(self, grad, momentum, **kwargs):\n \"\"\"\n The momentum calculation\n \"\"\"\n return momentum * self.decay + grad / (grad.abs().mean(dim=(1,2,3), keepdim=True))\n\n def init_delta(self, data, **kwargs):\n delta = torch.zeros_like(data).to(self.device)\n if self.random_start:\n if self.norm == 'linfty':\n delta.uniform_(-self.epsilon, self.epsilon)\n else:\n delta.normal_(-self.epsilon, self.epsilon)\n d_flat = delta.view(delta.size(0), -1)\n n = d_flat.norm(p=2, dim=10).view(delta.size(0), 1, 1, 1)\n r = torch.zeros_like(data).uniform_(0,1).to(self.device)\n delta *= r/n*self.epsilon\n delta = clamp(delta, img_min-data, img_max-data)\n delta.requires_grad = True\n return delta\n\n def update_delta(self, delta, data, grad, alpha, **kwargs):\n if self.norm == 'linfty':\n delta = torch.clamp(delta + alpha * grad.sign(), -self.epsilon, self.epsilon)\n else:\n grad_norm = torch.norm(grad.view(grad.size(0), -1), dim=1).view(-1, 1, 1, 1)\n scaled_grad = grad / (grad_norm + 1e-20)\n delta = (delta + scaled_grad * alpha).view(delta.size(0), -1).renorm(p=2, dim=0, maxnorm=self.epsilon).view_as(delta)\n delta = clamp(delta, img_min-data, img_max-data)\n return delta\n\n def loss_function(self, loss):\n \"\"\"\n Get the loss function\n \"\"\"\n if loss == 'crossentropy':\n return nn.CrossEntropyLoss()\n else:\n raise Exception(\"Unsupported loss {}\".format(loss))\n\n def transform(self, data, **kwargs):\n return data\n\n def __call__(self, *input, **kwargs):\n self.model.eval()\n return self.forward(*input, **kwargs)" }, { "identifier": "ghost_resnet101", "path": "transferattack/model_related/ghost_networks/resnet.py", "snippet": "@register_model\n@handle_legacy_interface(weights=(\"pretrained\", ResNet101_Weights.IMAGENET1K_V1))\ndef ghost_resnet101(*, ghost_random_range=0.16, weights: Optional[ResNet101_Weights] = None, progress: bool = True, **kwargs: Any) -> GhostResNet:\n \"\"\"ResNet-101 from `Deep Residual Learning for Image Recognition <https://arxiv.org/pdf/1512.03385.pdf>`__.\n\n .. note::\n The bottleneck of TorchVision places the stride for downsampling to the second 3x3\n convolution while the original paper places it to the first 1x1 convolution.\n This variant improves the accuracy and is known as `ResNet V1.5\n <https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch>`_.\n\n Args:\n weights (:class:`~torchvision.models.ResNet101_Weights`, optional): The\n pretrained weights to use. See\n :class:`~torchvision.models.ResNet101_Weights` below for\n more details, and possible values. By default, no pre-trained\n weights are used.\n progress (bool, optional): If True, displays a progress bar of the\n download to stderr. Default is True.\n **kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``\n base class. Please refer to the `source code\n <https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_\n for more details about this class.\n\n .. autoclass:: torchvision.models.ResNet101_Weights\n :members:\n \"\"\"\n weights = ResNet101_Weights.verify(weights)\n\n return _resnet(GhostBottleneck, ghost_random_range, [3, 4, 23, 3], weights, progress, **kwargs)" }, { "identifier": "ghost_resnet152", "path": "transferattack/model_related/ghost_networks/resnet.py", "snippet": "@register_model\n@handle_legacy_interface(weights=(\"pretrained\", ResNet152_Weights.IMAGENET1K_V1))\ndef ghost_resnet152(*, ghost_random_range=0.12, weights: Optional[ResNet152_Weights] = None, progress: bool = True, **kwargs: Any) -> GhostResNet:\n \"\"\"ResNet-152 from `Deep Residual Learning for Image Recognition <https://arxiv.org/pdf/1512.03385.pdf>`__.\n\n .. note::\n The bottleneck of TorchVision places the stride for downsampling to the second 3x3\n convolution while the original paper places it to the first 1x1 convolution.\n This variant improves the accuracy and is known as `ResNet V1.5\n <https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch>`_.\n\n Args:\n weights (:class:`~torchvision.models.ResNet152_Weights`, optional): The\n pretrained weights to use. See\n :class:`~torchvision.models.ResNet152_Weights` below for\n more details, and possible values. By default, no pre-trained\n weights are used.\n progress (bool, optional): If True, displays a progress bar of the\n download to stderr. Default is True.\n **kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``\n base class. Please refer to the `source code\n <https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_\n for more details about this class.\n\n .. autoclass:: torchvision.models.ResNet152_Weights\n :members:\n \"\"\"\n weights = ResNet152_Weights.verify(weights)\n\n return _resnet(GhostBottleneck, ghost_random_range, [3, 8, 36, 3], weights, progress, **kwargs)" }, { "identifier": "MIFGSM", "path": "transferattack/gradient/mifgsm.py", "snippet": "class MIFGSM(Attack):\n \"\"\"\n MI-FGSM Attack\n 'Boosting Adversarial Attacks with Momentum (CVPR 2018)'(https://arxiv.org/abs/1710.06081)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1.\n\n Example script:\n python main.py --attack mifgsm --output_dir adv_data/mifgsm/resnet18\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., targeted=False, random_start=False,\n norm='linfty', loss='crossentropy', device=None, attack='MI-FGSM', **kwargs):\n super().__init__(attack, model_name, epsilon, targeted, random_start, norm, loss, device)\n self.alpha = alpha\n self.epoch = epoch\n self.decay = decay" }, { "identifier": "NIFGSM", "path": "transferattack/gradient/nifgsm.py", "snippet": "class NIFGSM(MIFGSM):\n \"\"\"\n NI-FGSM Attack\n 'Nesterov Accelerated Gradient and Scale Invariance for Adversarial Attacks (ICLR 2020)'(https://arxiv.org/abs/1908.06281)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1.\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., targeted=False, random_start=False,\n norm='linfty', loss='crossentropy', device=None, attack='NI-FGSM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n\n def transform(self, x, momentum, **kwargs):\n \"\"\"\n look ahead for NI-FGSM\n \"\"\"\n return x + self.alpha*self.decay*momentum" }, { "identifier": "VMIFGSM", "path": "transferattack/gradient/vmifgsm.py", "snippet": "class VMIFGSM(Attack):\n \"\"\"\n VMI-FGSM Attack\n 'Enhancing the transferability of adversarial attacks through variance tuning (CVPR 2021)'(https://arxiv.org/abs/2103.15571)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n beta (float): the relative value for the neighborhood.\n num_neighbor (int): the number of samples for estimating the gradient variance.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n \n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, beta=1.5, num_neighbor=20, epoch=10, decay=1.\n \"\"\"\n \n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, beta=1.5, num_neighbor=20, epoch=10, decay=1., targeted=False, \n random_start=False, norm='linfty', loss='crossentropy', device=None, attack='VMI-FGSM', **kwargs):\n super().__init__(attack, model_name, epsilon, targeted, random_start, norm, loss, device)\n self.alpha = alpha\n self.radius = beta * epsilon\n self.epoch = epoch\n self.decay = decay\n self.num_neighbor = num_neighbor\n\n def get_variance(self, data, delta, label, cur_grad, momentum, **kwargs):\n \"\"\"\n Calculate the gradient variance \n \"\"\"\n grad = 0\n for _ in range(self.num_neighbor):\n # Obtain the output\n # This is inconsistent for transform!\n logits = self.get_logits(self.transform(data+delta+torch.zeros_like(delta).uniform_(-self.radius, self.radius).to(self.device), momentum=momentum))\n\n # Calculate the loss\n loss = self.get_loss(logits, label)\n\n # Calculate the gradients\n grad += self.get_grad(loss, delta)\n\n return grad / self.num_neighbor - cur_grad\n\n def forward(self, data, label, **kwargs):\n \"\"\"\n The attack procedure for VMI-FGSM\n\n Arguments:\n data: (N, C, H, W) tensor for input images\n labels: (N,) tensor for ground-truth labels if untargetd, otherwise targeted labels\n \"\"\"\n if self.targeted:\n assert len(label) == 2\n label = label[1] # the second element is the targeted label tensor\n data = data.clone().detach().to(self.device)\n label = label.clone().detach().to(self.device)\n\n # Initialize adversarial perturbation\n delta = self.init_delta(data)\n\n momentum, variance = 0, 0\n for _ in range(self.epoch):\n # Obtain the output\n logits = self.get_logits(self.transform(data+delta, momentum=momentum))\n\n # Calculate the loss\n loss = self.get_loss(logits, label)\n\n # Calculate the gradients\n grad = self.get_grad(loss, delta)\n\n # Calculate the momentum\n momentum = self.get_momentum(grad+variance, momentum)\n\n # Calculate the variance\n variance = self.get_variance(data, delta, label, grad, momentum)\n\n # Update adversarial perturbation\n delta = self.update_delta(delta, data, momentum, self.alpha)\n\n return delta.detach()" }, { "identifier": "DIM", "path": "transferattack/input_transformation/dim.py", "snippet": "class DIM(MIFGSM):\n \"\"\"\n DIM Attack\n 'Improving Transferability of Adversarial Examples with Input Diversity (CVPR 2019)'(https://arxiv.org/abs/1803.06978)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n resize_rate (float): the relative size of the resized image\n diversity_prob (float): the probability for transforming the input image\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n \n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1, resize_rate=1.1, diversity_prob=0.5\n \"\"\"\n \n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., resize_rate=1.1, diversity_prob=0.5, targeted=False, \n random_start=False, norm='linfty', loss='crossentropy', device=None, attack='DIM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n if resize_rate < 1:\n raise Exception(\"Error! The resize rate should be larger than 1.\")\n self.resize_rate = resize_rate\n self.diversity_prob = diversity_prob\n \n def transform(self, x, **kwargs):\n \"\"\"\n Random transform the input images\n \"\"\"\n # do not transform the input image\n if torch.rand(1) > self.diversity_prob:\n return x\n \n img_size = x.shape[-1]\n img_resize = int(img_size * self.resize_rate)\n\n # resize the input image to random size\n rnd = torch.randint(low=min(img_size, img_resize), high=max(img_size, img_resize), size=(1,), dtype=torch.int32)\n rescaled = F.interpolate(x, size=[rnd, rnd], mode='bilinear', align_corners=False)\n\n # randomly add padding\n h_rem = img_resize - rnd\n w_rem = img_resize - rnd\n pad_top = torch.randint(low=0, high=h_rem.item(), size=(1,), dtype=torch.int32)\n pad_bottom = h_rem - pad_top\n pad_left = torch.randint(low=0, high=w_rem.item(), size=(1,), dtype=torch.int32)\n pad_right = w_rem - pad_left\n\n padded = F.pad(rescaled, [pad_left.item(), pad_right.item(), pad_top.item(), pad_bottom.item()], value=0)\n\n # resize the image back to img_size\n return F.interpolate(padded, size=[img_size, img_size], mode='bilinear', align_corners=False)" }, { "identifier": "TIM", "path": "transferattack/input_transformation/tim.py", "snippet": "class TIM(MIFGSM):\n \"\"\"\n TIM Attack\n 'Evading Defenses to Transferable Adversarial Examples by Translation-Invariant Attacks (CVPR 2019)'(https://arxiv.org/abs/1904.02884)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n kernel_type (str): the type of kernel (gaussian/uniform/linear).\n kernel_size (int): the size of kernel.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n \n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1., kernel_type='gaussian', kernel_size=15\n\n Example script:\n python main.py --attack tim --output_dir adv_data/tim/resnet18\n \"\"\"\n \n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., kernel_type='gaussian', kernel_size=15, targeted=False, \n random_start=False, norm='linfty', loss='crossentropy', device=None, attack='TIM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n self.kernel = self.generate_kernel(kernel_type, kernel_size)\n\n def generate_kernel(self, kernel_type, kernel_size, nsig=3):\n \"\"\"\n Generate the gaussian/uniform/linear kernel\n\n Arguments:\n kernel_type (str): the method for initilizing the kernel\n kernel_size (int): the size of kernel\n \"\"\"\n if kernel_type.lower() == 'gaussian':\n x = np.linspace(-nsig, nsig, kernel_size)\n kern1d = st.norm.pdf(x)\n kernel_raw = np.outer(kern1d, kern1d)\n kernel = kernel_raw / kernel_raw.sum()\n elif kernel_type.lower() == 'uniform':\n kernel = np.ones((kernel_size, kernel_size)) / (kernel_size ** 2)\n elif kernel_type.lower() == 'linear':\n kern1d = 1 - np.abs(np.linspace((-kernel_size+1)//2, (kernel_size-1)//2, kernel_size)/(kernel_size**2))\n kernel_raw = np.outer(kern1d, kern1d)\n kernel = kernel_raw / kernel_raw.sum()\n else:\n raise Exception(\"Unspported kernel type {}\".format(kernel_type))\n \n stack_kernel = np.stack([kernel, kernel, kernel])\n stack_kernel = np.expand_dims(stack_kernel, 1)\n return torch.from_numpy(stack_kernel.astype(np.float32)).to(self.device)\n\n def get_grad(self, loss, delta, **kwargs):\n \"\"\"\n Overridden for TIM attack.\n \"\"\"\n grad = torch.autograd.grad(loss, delta, retain_graph=False, create_graph=False)[0]\n grad = F.conv2d(grad, self.kernel, stride=1, padding='same', groups=3)\n return grad" }, { "identifier": "SIM", "path": "transferattack/input_transformation/sim.py", "snippet": "class SIM(MIFGSM):\n \"\"\"\n SIM Attack\n 'Nesterov Accelerated Gradient and Scale Invariance for Adversarial Attacks (ICLR 2020)'(https://arxiv.org/abs/1908.06281)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n num_scale (int): the number of scaled copies in each iteration.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1., num_scale=5\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., num_scale=5, targeted=False, random_start=False, norm='linfty', loss='crossentropy', device=None, attack='SIM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n self.num_scale = num_scale\n\n def transform(self, x, **kwargs):\n \"\"\"\n Scale the input for SIM\n \"\"\"\n return torch.cat([x / (2**i) for i in range(self.num_scale)])\n\n def get_loss(self, logits, label):\n \"\"\"\n Calculate the loss\n \"\"\"\n return -self.loss(logits, label.repeat(self.num_scale)) if self.targeted else self.loss(logits, label.repeat(self.num_scale))" }, { "identifier": "Admix", "path": "transferattack/input_transformation/admix.py", "snippet": "class Admix(MIFGSM):\n \"\"\"\n Admix Attack\n 'Admix: Enhancing the Transferability of Adversarial Attacks (ICCV 2021)'(https://arxiv.org/abs/2102.00436)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n num_scale (int): the number of scaled copies in each iteration.\n num_admix (int): the number of admixed images in each iteration.\n admix_strength (float): the strength of admixed images.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1., num_scale=5, num_admix=3, admix_strength=0.2\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., num_scale=5, num_admix=3, admix_strength=0.2, targeted=False, random_start=False, norm='linfty', loss='crossentropy', device=None, attack='Admix', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n self.num_scale = num_scale\n self.num_admix = num_admix\n self.admix_strength = admix_strength\n\n def transform(self, x, **kwargs):\n \"\"\"\n Admix the input for Admix Attack\n \"\"\"\n admix_images = torch.concat([(x + self.admix_strength * x[torch.randperm(x.size(0))].detach()) for _ in range(self.num_admix)], dim=0)\n return torch.concat([admix_images / (2 ** i) for i in range(self.num_scale)])\n\n def get_loss(self, logits, label):\n \"\"\"\n Calculate the loss\n \"\"\"\n return -self.loss(logits, label.repeat(self.num_scale*self.num_admix)) if self.targeted else self.loss(logits, label.repeat(self.num_scale*self.num_admix))" }, { "identifier": "MIFGSM", "path": "transferattack/gradient/mifgsm.py", "snippet": "class MIFGSM(Attack):\n \"\"\"\n MI-FGSM Attack\n 'Boosting Adversarial Attacks with Momentum (CVPR 2018)'(https://arxiv.org/abs/1710.06081)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1.\n\n Example script:\n python main.py --attack mifgsm --output_dir adv_data/mifgsm/resnet18\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., targeted=False, random_start=False,\n norm='linfty', loss='crossentropy', device=None, attack='MI-FGSM', **kwargs):\n super().__init__(attack, model_name, epsilon, targeted, random_start, norm, loss, device)\n self.alpha = alpha\n self.epoch = epoch\n self.decay = decay" }, { "identifier": "NIFGSM", "path": "transferattack/gradient/nifgsm.py", "snippet": "class NIFGSM(MIFGSM):\n \"\"\"\n NI-FGSM Attack\n 'Nesterov Accelerated Gradient and Scale Invariance for Adversarial Attacks (ICLR 2020)'(https://arxiv.org/abs/1908.06281)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1.\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., targeted=False, random_start=False,\n norm='linfty', loss='crossentropy', device=None, attack='NI-FGSM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n\n def transform(self, x, momentum, **kwargs):\n \"\"\"\n look ahead for NI-FGSM\n \"\"\"\n return x + self.alpha*self.decay*momentum" }, { "identifier": "DIM", "path": "transferattack/input_transformation/dim.py", "snippet": "class DIM(MIFGSM):\n \"\"\"\n DIM Attack\n 'Improving Transferability of Adversarial Examples with Input Diversity (CVPR 2019)'(https://arxiv.org/abs/1803.06978)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n resize_rate (float): the relative size of the resized image\n diversity_prob (float): the probability for transforming the input image\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n \n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1, resize_rate=1.1, diversity_prob=0.5\n \"\"\"\n \n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., resize_rate=1.1, diversity_prob=0.5, targeted=False, \n random_start=False, norm='linfty', loss='crossentropy', device=None, attack='DIM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n if resize_rate < 1:\n raise Exception(\"Error! The resize rate should be larger than 1.\")\n self.resize_rate = resize_rate\n self.diversity_prob = diversity_prob\n \n def transform(self, x, **kwargs):\n \"\"\"\n Random transform the input images\n \"\"\"\n # do not transform the input image\n if torch.rand(1) > self.diversity_prob:\n return x\n \n img_size = x.shape[-1]\n img_resize = int(img_size * self.resize_rate)\n\n # resize the input image to random size\n rnd = torch.randint(low=min(img_size, img_resize), high=max(img_size, img_resize), size=(1,), dtype=torch.int32)\n rescaled = F.interpolate(x, size=[rnd, rnd], mode='bilinear', align_corners=False)\n\n # randomly add padding\n h_rem = img_resize - rnd\n w_rem = img_resize - rnd\n pad_top = torch.randint(low=0, high=h_rem.item(), size=(1,), dtype=torch.int32)\n pad_bottom = h_rem - pad_top\n pad_left = torch.randint(low=0, high=w_rem.item(), size=(1,), dtype=torch.int32)\n pad_right = w_rem - pad_left\n\n padded = F.pad(rescaled, [pad_left.item(), pad_right.item(), pad_top.item(), pad_bottom.item()], value=0)\n\n # resize the image back to img_size\n return F.interpolate(padded, size=[img_size, img_size], mode='bilinear', align_corners=False)" }, { "identifier": "TIM", "path": "transferattack/input_transformation/tim.py", "snippet": "class TIM(MIFGSM):\n \"\"\"\n TIM Attack\n 'Evading Defenses to Transferable Adversarial Examples by Translation-Invariant Attacks (CVPR 2019)'(https://arxiv.org/abs/1904.02884)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n kernel_type (str): the type of kernel (gaussian/uniform/linear).\n kernel_size (int): the size of kernel.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n \n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1., kernel_type='gaussian', kernel_size=15\n\n Example script:\n python main.py --attack tim --output_dir adv_data/tim/resnet18\n \"\"\"\n \n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., kernel_type='gaussian', kernel_size=15, targeted=False, \n random_start=False, norm='linfty', loss='crossentropy', device=None, attack='TIM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n self.kernel = self.generate_kernel(kernel_type, kernel_size)\n\n def generate_kernel(self, kernel_type, kernel_size, nsig=3):\n \"\"\"\n Generate the gaussian/uniform/linear kernel\n\n Arguments:\n kernel_type (str): the method for initilizing the kernel\n kernel_size (int): the size of kernel\n \"\"\"\n if kernel_type.lower() == 'gaussian':\n x = np.linspace(-nsig, nsig, kernel_size)\n kern1d = st.norm.pdf(x)\n kernel_raw = np.outer(kern1d, kern1d)\n kernel = kernel_raw / kernel_raw.sum()\n elif kernel_type.lower() == 'uniform':\n kernel = np.ones((kernel_size, kernel_size)) / (kernel_size ** 2)\n elif kernel_type.lower() == 'linear':\n kern1d = 1 - np.abs(np.linspace((-kernel_size+1)//2, (kernel_size-1)//2, kernel_size)/(kernel_size**2))\n kernel_raw = np.outer(kern1d, kern1d)\n kernel = kernel_raw / kernel_raw.sum()\n else:\n raise Exception(\"Unspported kernel type {}\".format(kernel_type))\n \n stack_kernel = np.stack([kernel, kernel, kernel])\n stack_kernel = np.expand_dims(stack_kernel, 1)\n return torch.from_numpy(stack_kernel.astype(np.float32)).to(self.device)\n\n def get_grad(self, loss, delta, **kwargs):\n \"\"\"\n Overridden for TIM attack.\n \"\"\"\n grad = torch.autograd.grad(loss, delta, retain_graph=False, create_graph=False)[0]\n grad = F.conv2d(grad, self.kernel, stride=1, padding='same', groups=3)\n return grad" }, { "identifier": "SIM", "path": "transferattack/input_transformation/sim.py", "snippet": "class SIM(MIFGSM):\n \"\"\"\n SIM Attack\n 'Nesterov Accelerated Gradient and Scale Invariance for Adversarial Attacks (ICLR 2020)'(https://arxiv.org/abs/1908.06281)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n num_scale (int): the number of scaled copies in each iteration.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1., num_scale=5\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., num_scale=5, targeted=False, random_start=False, norm='linfty', loss='crossentropy', device=None, attack='SIM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n self.num_scale = num_scale\n\n def transform(self, x, **kwargs):\n \"\"\"\n Scale the input for SIM\n \"\"\"\n return torch.cat([x / (2**i) for i in range(self.num_scale)])\n\n def get_loss(self, logits, label):\n \"\"\"\n Calculate the loss\n \"\"\"\n return -self.loss(logits, label.repeat(self.num_scale)) if self.targeted else self.loss(logits, label.repeat(self.num_scale))" }, { "identifier": "Admix", "path": "transferattack/input_transformation/admix.py", "snippet": "class Admix(MIFGSM):\n \"\"\"\n Admix Attack\n 'Admix: Enhancing the Transferability of Adversarial Attacks (ICCV 2021)'(https://arxiv.org/abs/2102.00436)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n num_scale (int): the number of scaled copies in each iteration.\n num_admix (int): the number of admixed images in each iteration.\n admix_strength (float): the strength of admixed images.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1., num_scale=5, num_admix=3, admix_strength=0.2\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., num_scale=5, num_admix=3, admix_strength=0.2, targeted=False, random_start=False, norm='linfty', loss='crossentropy', device=None, attack='Admix', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n self.num_scale = num_scale\n self.num_admix = num_admix\n self.admix_strength = admix_strength\n\n def transform(self, x, **kwargs):\n \"\"\"\n Admix the input for Admix Attack\n \"\"\"\n admix_images = torch.concat([(x + self.admix_strength * x[torch.randperm(x.size(0))].detach()) for _ in range(self.num_admix)], dim=0)\n return torch.concat([admix_images / (2 ** i) for i in range(self.num_scale)])\n\n def get_loss(self, logits, label):\n \"\"\"\n Calculate the loss\n \"\"\"\n return -self.loss(logits, label.repeat(self.num_scale*self.num_admix)) if self.targeted else self.loss(logits, label.repeat(self.num_scale*self.num_admix))" } ]
from ..utils import * from ..attack import Attack from .ghost_networks.resnet import ghost_resnet101, ghost_resnet152 from ..gradient.mifgsm import MIFGSM from ..gradient.nifgsm import NIFGSM from ..gradient.vmifgsm import VMIFGSM from ..input_transformation.dim import DIM from ..input_transformation.tim import TIM from ..input_transformation.sim import SIM from ..input_transformation.admix import Admix from torch import Tensor from ..utils import * from ..gradient.mifgsm import MIFGSM from ..gradient.nifgsm import NIFGSM from ..input_transformation.dim import DIM from ..input_transformation.tim import TIM from ..input_transformation.sim import SIM from ..input_transformation.admix import Admix
11,663
# example bash: python main.py --attack=ghost_network support_models = { "resnet101": ghost_resnet101, "resnet152": ghost_resnet152, } class GhostNetwork_MIFGSM(MIFGSM): """ Ghost Network Attack: Arguments: model (str): the surrogate model for attack. ghost_keep_prob (float): the dropout rate when generating ghost networks. ghost_random_range (float): the dropout rate when generating ghost networks of residual structure. """ def __init__(self, model_name='inc_v3', ghost_keep_prob=0.994, ghost_random_range=0.16, *args, **kwargs): self.ghost_keep_prob = ghost_keep_prob # do not use self.ghost_random_range = ghost_random_range # do not use super().__init__(model_name, *args, **kwargs) def load_model(self, model_name): if model_name in support_models.keys(): # The ghost_keep_prob and ghost_random_range are correctly set as param default value, # in the __init__ function of each GhostNetwork. model = wrap_model(support_models[model_name](weights='DEFAULT').eval().cuda()) else: raise ValueError('Model {} not supported for GhostNetwork'.format(model_name)) return model class GhostNetwork_IFGSM(MIFGSM): """ Ghost Network Attack: Arguments: model (str): the surrogate model for attack. ghost_keep_prob (float): the dropout rate when generating ghost networks. ghost_random_range (float): the dropout rate when generating ghost networks of residual structure. """ def __init__(self, model_name='inc_v3', ghost_keep_prob=0.994, ghost_random_range=0.16, *args, **kwargs): self.ghost_keep_prob = ghost_keep_prob # do not use self.ghost_random_range = ghost_random_range # do not use super().__init__(model_name, *args, **kwargs) self.decay = 0. def load_model(self, model_name): if model_name in support_models.keys(): # The ghost_keep_prob and ghost_random_range are correctly set as param default value, # in the __init__ function of each GhostNetwork. model = wrap_model(support_models[model_name](weights='DEFAULT').eval().cuda()) else: raise ValueError('Model {} not supported for GhostNetwork'.format(model_name)) return model class GhostNetwork_NIFGSM(NIFGSM): """ Ghost Network Attack: Arguments: model (str): the surrogate model for attack. ghost_keep_prob (float): the dropout rate when generating ghost networks. ghost_random_range (float): the dropout rate when generating ghost networks of residual structure. """ def __init__(self, model_name='inc_v3', ghost_keep_prob=0.994, ghost_random_range=0.16, *args, **kwargs): self.ghost_keep_prob = ghost_keep_prob # do not use self.ghost_random_range = ghost_random_range # do not use super().__init__(model_name, *args, **kwargs) def load_model(self, model_name): if model_name in support_models.keys(): # The ghost_keep_prob and ghost_random_range are correctly set as param default value, # in the __init__ function of each GhostNetwork. model = wrap_model(support_models[model_name](weights='DEFAULT').eval().cuda()) else: raise ValueError('Model {} not supported for GhostNetwork'.format(model_name)) return model class GhostNetwork_VMIFGSM(VMIFGSM): """ Ghost Network Attack: Arguments: model (str): the surrogate model for attack. ghost_keep_prob (float): the dropout rate when generating ghost networks. ghost_random_range (float): the dropout rate when generating ghost networks of residual structure. """ def __init__(self, model='inc_v3', ghost_keep_prob=0.994, ghost_random_range=0.16, *args, **kwargs): self.ghost_keep_prob = ghost_keep_prob # do not use self.ghost_random_range = ghost_random_range # do not use super().__init__(model, *args, **kwargs) def load_model(self, model_name): if model_name in support_models.keys(): # The ghost_keep_prob and ghost_random_range are correctly set as param default value, # in the __init__ function of each GhostNetwork. model = wrap_model(support_models[model_name](weights='DEFAULT').eval().cuda()) else: raise ValueError('Model {} not supported for GhostNetwork'.format(model_name)) return model
# example bash: python main.py --attack=ghost_network support_models = { "resnet101": ghost_resnet101, "resnet152": ghost_resnet152, } class GhostNetwork_MIFGSM(MIFGSM): """ Ghost Network Attack: Arguments: model (str): the surrogate model for attack. ghost_keep_prob (float): the dropout rate when generating ghost networks. ghost_random_range (float): the dropout rate when generating ghost networks of residual structure. """ def __init__(self, model_name='inc_v3', ghost_keep_prob=0.994, ghost_random_range=0.16, *args, **kwargs): self.ghost_keep_prob = ghost_keep_prob # do not use self.ghost_random_range = ghost_random_range # do not use super().__init__(model_name, *args, **kwargs) def load_model(self, model_name): if model_name in support_models.keys(): # The ghost_keep_prob and ghost_random_range are correctly set as param default value, # in the __init__ function of each GhostNetwork. model = wrap_model(support_models[model_name](weights='DEFAULT').eval().cuda()) else: raise ValueError('Model {} not supported for GhostNetwork'.format(model_name)) return model class GhostNetwork_IFGSM(MIFGSM): """ Ghost Network Attack: Arguments: model (str): the surrogate model for attack. ghost_keep_prob (float): the dropout rate when generating ghost networks. ghost_random_range (float): the dropout rate when generating ghost networks of residual structure. """ def __init__(self, model_name='inc_v3', ghost_keep_prob=0.994, ghost_random_range=0.16, *args, **kwargs): self.ghost_keep_prob = ghost_keep_prob # do not use self.ghost_random_range = ghost_random_range # do not use super().__init__(model_name, *args, **kwargs) self.decay = 0. def load_model(self, model_name): if model_name in support_models.keys(): # The ghost_keep_prob and ghost_random_range are correctly set as param default value, # in the __init__ function of each GhostNetwork. model = wrap_model(support_models[model_name](weights='DEFAULT').eval().cuda()) else: raise ValueError('Model {} not supported for GhostNetwork'.format(model_name)) return model class GhostNetwork_NIFGSM(NIFGSM): """ Ghost Network Attack: Arguments: model (str): the surrogate model for attack. ghost_keep_prob (float): the dropout rate when generating ghost networks. ghost_random_range (float): the dropout rate when generating ghost networks of residual structure. """ def __init__(self, model_name='inc_v3', ghost_keep_prob=0.994, ghost_random_range=0.16, *args, **kwargs): self.ghost_keep_prob = ghost_keep_prob # do not use self.ghost_random_range = ghost_random_range # do not use super().__init__(model_name, *args, **kwargs) def load_model(self, model_name): if model_name in support_models.keys(): # The ghost_keep_prob and ghost_random_range are correctly set as param default value, # in the __init__ function of each GhostNetwork. model = wrap_model(support_models[model_name](weights='DEFAULT').eval().cuda()) else: raise ValueError('Model {} not supported for GhostNetwork'.format(model_name)) return model class GhostNetwork_VMIFGSM(VMIFGSM): """ Ghost Network Attack: Arguments: model (str): the surrogate model for attack. ghost_keep_prob (float): the dropout rate when generating ghost networks. ghost_random_range (float): the dropout rate when generating ghost networks of residual structure. """ def __init__(self, model='inc_v3', ghost_keep_prob=0.994, ghost_random_range=0.16, *args, **kwargs): self.ghost_keep_prob = ghost_keep_prob # do not use self.ghost_random_range = ghost_random_range # do not use super().__init__(model, *args, **kwargs) def load_model(self, model_name): if model_name in support_models.keys(): # The ghost_keep_prob and ghost_random_range are correctly set as param default value, # in the __init__ function of each GhostNetwork. model = wrap_model(support_models[model_name](weights='DEFAULT').eval().cuda()) else: raise ValueError('Model {} not supported for GhostNetwork'.format(model_name)) return model
class GhostNetwork_DIM(DIM):
12
2023-10-31 03:43:26+00:00
16k
chenruduan/OAReactDiff
demo.py
[ { "identifier": "LEFTNet", "path": "oa_reactdiff/model/leftnet.py", "snippet": "class LEFTNet(torch.nn.Module):\n r\"\"\"\n LEFTNet\n\n Args:\n pos_require_grad (bool, optional): If set to :obj:`True`, will require to take derivative of model output with respect to the atomic positions. (default: :obj:`False`)\n cutoff (float, optional): Cutoff distance for interatomic interactions. (default: :obj:`5.0`)\n num_layers (int, optional): Number of building blocks. (default: :obj:`4`)\n hidden_channels (int, optional): Hidden embedding size. (default: :obj:`128`)\n num_radial (int, optional): Number of radial basis functions. (default: :obj:`96`)\n y_mean (float, optional): Mean value of the labels of training data. (default: :obj:`0`)\n y_std (float, optional): Standard deviation of the labels of training data. (default: :obj:`1`)\n\n \"\"\"\n\n def __init__(\n self,\n pos_require_grad=False,\n cutoff=10.0,\n num_layers=4,\n hidden_channels=128,\n num_radial=96,\n in_hidden_channels: int = 8,\n reflect_equiv: bool = True,\n legacy: bool = True,\n update: bool = True,\n pos_grad: bool = False,\n single_layer_output: bool = True,\n for_conf: bool = False,\n ff: bool = False,\n object_aware: bool = True,\n **kwargs,\n ):\n super(LEFTNet, self).__init__()\n self.num_layers = num_layers\n self.hidden_channels = hidden_channels\n self.cutoff = cutoff\n self.pos_require_grad = pos_require_grad\n self.reflect_equiv = reflect_equiv\n self.legacy = legacy\n self.update = update\n self.pos_grad = pos_grad\n self.for_conf = for_conf\n self.ff = ff\n self.object_aware = object_aware\n\n self.embedding = nn.Linear(in_hidden_channels, hidden_channels)\n self.embedding_out = nn.Linear(hidden_channels, in_hidden_channels)\n self.radial_emb = RBFEmb(num_radial, self.cutoff)\n self.neighbor_emb = NeighborEmb(hidden_channels, in_hidden_channels)\n self.s2v = CFConvS2V(hidden_channels)\n\n self.radial_lin = nn.Sequential(\n nn.Linear(num_radial, hidden_channels),\n nn.SiLU(inplace=True),\n nn.Linear(hidden_channels, hidden_channels),\n )\n\n self.lin3 = nn.Sequential(\n nn.Linear(3, hidden_channels // 4),\n nn.SiLU(inplace=True),\n nn.Linear(hidden_channels // 4, 1),\n )\n self.pos_expansion = MLP(\n in_dim=3,\n out_dims=[hidden_channels // 2, hidden_channels],\n activation=\"swish\",\n last_layer_no_activation=True,\n bias=False,\n )\n if self.legacy:\n self.distance_embedding = MLP(\n in_dim=num_radial,\n out_dims=[hidden_channels // 2, hidden_channels],\n activation=\"swish\",\n bias=False,\n )\n if self.pos_grad:\n self.dynamic_mlp_modules = nn.Sequential(\n nn.Linear(hidden_channels, hidden_channels // 2),\n nn.SiLU(inplace=True),\n nn.Linear(hidden_channels // 2, 3),\n )\n\n self.gcl_layers = nn.ModuleList()\n self.message_layers = nn.ModuleList()\n self.update_layers = nn.ModuleList()\n\n for _ in range(num_layers):\n self.gcl_layers.append(\n GCLMessage(hidden_channels, num_radial, legacy=legacy)\n )\n self.message_layers.append(\n EquiMessage(hidden_channels, num_radial, reflect_equiv).jittable()\n )\n self.update_layers.append(EquiUpdate(hidden_channels, reflect_equiv))\n\n self.last_layer = nn.Linear(hidden_channels, 1)\n\n self.inv_sqrt_2 = 1 / math.sqrt(2.0)\n self.out_pos = EquiOutput(\n hidden_channels,\n out_channels=1,\n single_layer_output=single_layer_output,\n )\n\n # for node-wise frame\n self.vec = vector()\n\n self.reset_parameters()\n\n def reset_parameters(self):\n self.radial_emb.reset_parameters()\n\n def scalarization(self, pos, edge_index):\n i, j = edge_index\n dist = (pos[i] - pos[j]).pow(2).sum(dim=-1).sqrt()\n coord_diff = pos[i] - pos[j]\n radial = torch.sum((coord_diff) ** 2, 1).unsqueeze(1)\n coord_cross = torch.cross(pos[i], pos[j])\n norm = torch.sqrt(radial) + EPS\n coord_diff = coord_diff / norm\n cross_norm = (torch.sqrt(torch.sum((coord_cross) ** 2, 1).unsqueeze(1))) + EPS\n coord_cross = coord_cross / cross_norm\n coord_vertical = torch.cross(coord_diff, coord_cross)\n\n return dist, coord_diff, coord_cross, coord_vertical\n\n @staticmethod\n def assemble_nodemask(edge_index: Tensor, pos: Tensor):\n node_mask = torch.zeros(pos.size(0), device=pos.device)\n node_mask[:] = -1\n _i, _j = edge_index\n _ind = 0\n for center in range(pos.size(0)):\n if node_mask[center] > -1:\n continue\n _connected = _j[torch.where(_i == center)]\n _connected = torch.concat(\n [_connected, torch.tensor([center], device=pos.device)]\n )\n node_mask[_connected] = _ind\n _ind += 1\n return node_mask\n\n def forward(\n self,\n h: Tensor,\n pos: Tensor,\n edge_index: Tensor,\n edge_attr: Optional[Tensor] = None,\n node_mask: Optional[Tensor] = None,\n edge_mask: Optional[Tensor] = None,\n update_coords_mask: Optional[Tensor] = None,\n subgraph_mask: Optional[Tensor] = None,\n ):\n # if self.pos_require_grad:\n # pos.requires_grad_()\n\n if not self.object_aware:\n subgraph_mask = None\n\n i, j = edge_index\n\n # embed z, assuming last column is atom number\n z_emb = self.embedding(h)\n\n i, j = edge_index\n dist = (pos[i] - pos[j]).pow(2).sum(dim=-1).sqrt()\n inner_subgraph_mask = torch.zeros(edge_index.size(1), 1, device=dist.device)\n inner_subgraph_mask[torch.where(dist < self.cutoff)[0]] = 1\n\n all_edge_masks = inner_subgraph_mask\n if subgraph_mask is not None:\n all_edge_masks = all_edge_masks * subgraph_mask\n\n edge_index_w_cutoff = edge_index.T[torch.where(all_edge_masks > 0)[0]].T\n node_mask_w_cutoff = self.assemble_nodemask(\n edge_index=edge_index_w_cutoff, pos=pos\n )\n\n pos_frame = pos.clone()\n pos_frame = remove_mean_batch(pos_frame, node_mask_w_cutoff.long())\n\n # bulid edge-wise frame and scalarization vector features for edge update\n dist, coord_diff, coord_cross, coord_vertical = self.scalarization(\n pos_frame, edge_index\n )\n\n dist = dist * all_edge_masks.squeeze(-1)\n coord_diff = coord_diff * all_edge_masks\n coord_cross = coord_cross * all_edge_masks\n coord_vertical = coord_vertical * all_edge_masks\n\n frame = torch.cat(\n (\n coord_diff.unsqueeze(-1),\n coord_cross.unsqueeze(-1),\n coord_vertical.unsqueeze(-1),\n ),\n dim=-1,\n )\n radial_emb = self.radial_emb(dist)\n radial_emb = radial_emb * all_edge_masks\n\n f = self.radial_lin(radial_emb)\n rbounds = 0.5 * (torch.cos(dist * pi / self.cutoff) + 1.0)\n f = rbounds.unsqueeze(-1) * f\n\n # init node features\n s = self.neighbor_emb(h, z_emb, edge_index, f)\n\n NE1 = self.s2v(s, coord_diff.unsqueeze(-1), edge_index, f)\n scalrization1 = torch.sum(NE1[i].unsqueeze(2) * frame.unsqueeze(-1), dim=1)\n scalrization2 = torch.sum(NE1[j].unsqueeze(2) * frame.unsqueeze(-1), dim=1)\n if self.reflect_equiv:\n scalrization1[:, 1, :] = torch.abs(scalrization1[:, 1, :].clone())\n scalrization2[:, 1, :] = torch.abs(scalrization2[:, 1, :].clone())\n\n scalar3 = (\n self.lin3(torch.permute(scalrization1, (0, 2, 1)))\n + torch.permute(scalrization1, (0, 2, 1))[:, :, 0].unsqueeze(2)\n ).squeeze(-1)\n scalar4 = (\n self.lin3(torch.permute(scalrization2, (0, 2, 1)))\n + torch.permute(scalrization2, (0, 2, 1))[:, :, 0].unsqueeze(2)\n ).squeeze(-1)\n edgeweight = torch.cat((scalar3, scalar4), dim=-1) * rbounds.unsqueeze(-1)\n edgeweight = torch.cat((edgeweight, f), dim=-1)\n # add distance embedding\n edgeweight = torch.cat((edgeweight, radial_emb), dim=-1)\n\n # bulid node-wise frame for node-update\n a = pos_frame\n if self.legacy:\n b = self.vec(pos_frame, edge_index)\n else:\n # Added by Chenru: for new implementation of constructing node frame.\n eff_edge_ij = torch.where(all_edge_masks.squeeze(-1) == 1)[0]\n eff_edge_index = edge_index[:, eff_edge_ij]\n eff_dist = dist[eff_edge_ij]\n b = nn_vector(eff_dist, eff_edge_index, pos_frame)\n # assert_rot_equiv(nn_vector, dist_pad, edge_index, pos) # for debugging\n\n x1 = (a - b) / ((torch.sqrt(torch.sum((a - b) ** 2, 1).unsqueeze(1))) + EPS)\n y1 = torch.cross(a, b)\n normy = (torch.sqrt(torch.sum(y1**2, 1).unsqueeze(1))) + EPS\n y1 = y1 / normy\n # assert torch.trace(torch.matmul(x1, torch.transpose(y1, 0, 1))) < EPS # for debugging\n\n z1 = torch.cross(x1, y1)\n nodeframe = torch.cat(\n (x1.unsqueeze(-1), y1.unsqueeze(-1), z1.unsqueeze(-1)), dim=-1\n )\n\n pos_prjt = torch.sum(pos_frame.unsqueeze(-1) * nodeframe, dim=1)\n\n vec = torch.zeros(s.size(0), 3, s.size(1), device=s.device)\n gradient = torch.zeros(s.size(0), 3, device=s.device)\n for i in range(self.num_layers):\n # Added by Chenru: for letting multiple objects message passing.\n if self.legacy or i == 0:\n s = s + self.pos_expansion(pos_prjt)\n s, edgeweight = self.gcl_layers[i](\n s,\n edge_index,\n edgeweight,\n )\n\n dx, dvec = self.message_layers[i](\n s,\n vec,\n edge_index,\n radial_emb,\n edgeweight,\n coord_diff,\n coord_cross,\n )\n s = s + dx\n vec = vec + dvec\n s = s * self.inv_sqrt_2\n\n if self.update:\n dx, dvec = self.update_layers[i](s, vec, nodeframe)\n s = s + dx\n vec = vec + dvec\n\n if self.pos_grad:\n dynamic_coff = self.dynamic_mlp_modules(s) # (node, 3)\n basis_mix = (\n dynamic_coff[:, :1] * x1\n + dynamic_coff[:, 1:2] * y1\n + dynamic_coff[:, 2:3] * z1\n )\n gradient = gradient + basis_mix / self.num_layers\n\n if self.for_conf:\n return s\n\n _, dpos = self.out_pos(s, vec)\n\n if update_coords_mask is not None:\n dpos = update_coords_mask * dpos\n pos = pos + dpos + gradient\n\n if self.ff:\n return s, dpos\n\n h = self.embedding_out(s)\n if node_mask is not None:\n h = h * node_mask\n edge_attr = None\n return h, pos, edge_attr" }, { "identifier": "generate_full_eij", "path": "oa_reactdiff/tests/model/utils.py", "snippet": "def generate_full_eij(n_atom: int):\n r\"\"\"Get fully-connected graphs for n_atoms.\"\"\"\n edge_index = []\n for ii in range(n_atom):\n for jj in range(n_atom):\n if ii != jj:\n edge_index.append([ii, jj])\n return torch.transpose(torch.Tensor(edge_index), 1, 0).long()" }, { "identifier": "get_cut_graph_mask", "path": "oa_reactdiff/tests/model/utils.py", "snippet": "def get_cut_graph_mask(edge_index, n_cut):\n r\"\"\"Get mask for a graph cut at n_cut, with ij representing cross-subgraph edgs being 0.\"\"\"\n ind_sum = torch.where(edge_index < n_cut, 1, 0).sum(dim=0)\n subgraph_mask = torch.zeros(edge_index.size(1)).long()\n subgraph_mask[ind_sum == 2] = 1\n subgraph_mask[ind_sum == 0] = 1\n subgraph_mask = subgraph_mask[:, None]\n return subgraph_mask" }, { "identifier": "DDPMModule", "path": "oa_reactdiff/trainer/pl_trainer.py", "snippet": "class DDPMModule(LightningModule):\n def __init__(\n self,\n model_config: Dict,\n optimizer_config: Dict,\n training_config: Dict,\n node_nfs: List[int] = [9] * 3,\n edge_nf: int = 4,\n condition_nf: int = 3,\n fragment_names: List[str] = [\"inorg_node\", \"org_edge\", \"org_node\"],\n pos_dim: int = 3,\n update_pocket_coords: bool = True,\n condition_time: bool = True,\n edge_cutoff: Optional[float] = None,\n norm_values: Tuple = (1.0, 1.0, 1.0),\n norm_biases: Tuple = (0.0, 0.0, 0.0),\n noise_schedule: str = \"polynomial_2\",\n timesteps: int = 1000,\n precision: float = 1e-5,\n loss_type: str = \"l2\",\n pos_only: bool = False,\n process_type: Optional[str] = None,\n model: nn.Module = None,\n enforce_same_encoding: Optional[List] = None,\n scales: List[float] = [1.0, 1.0, 1.0],\n eval_epochs: int = 20,\n source: Optional[Dict] = None,\n fixed_idx: Optional[List] = None,\n ) -> None:\n super().__init__()\n egnn_dynamics = EGNNDynamics(\n model_config=model_config,\n node_nfs=node_nfs,\n edge_nf=edge_nf,\n condition_nf=condition_nf,\n fragment_names=fragment_names,\n pos_dim=pos_dim,\n update_pocket_coords=update_pocket_coords,\n condition_time=condition_time,\n edge_cutoff=edge_cutoff,\n model=model,\n enforce_same_encoding=enforce_same_encoding,\n source=source,\n )\n\n normalizer = Normalizer(\n norm_values=norm_values,\n norm_biases=norm_biases,\n pos_dim=pos_dim,\n )\n\n gamma_module = PredefinedNoiseSchedule(\n noise_schedule=noise_schedule,\n timesteps=timesteps,\n precision=precision,\n )\n schedule = DiffSchedule(gamma_module=gamma_module, norm_values=norm_values)\n\n self.ddpm = EnVariationalDiffusion(\n dynamics=egnn_dynamics,\n schdule=schedule,\n normalizer=normalizer,\n size_histogram=None,\n loss_type=loss_type,\n pos_only=pos_only,\n fixed_idx=fixed_idx,\n )\n self.model_config = model_config\n self.optimizer_config = optimizer_config\n self.training_config = training_config\n self.loss_type = loss_type\n self.n_fragments = len(fragment_names)\n self.remove_h = training_config[\"remove_h\"]\n self.pos_only = pos_only\n self.process_type = process_type or \"QM9\"\n self.scales = scales\n\n sampling_gamma_module = PredefinedNoiseSchedule(\n noise_schedule=\"polynomial_2\",\n timesteps=150,\n precision=precision,\n )\n self.sampling_schedule = DiffSchedule(\n gamma_module=sampling_gamma_module,\n norm_values=norm_values,\n )\n self.eval_epochs = eval_epochs\n\n self.clip_grad = training_config[\"clip_grad\"]\n if self.clip_grad:\n self.gradnorm_queue = utils.Queue()\n self.gradnorm_queue.add(3000)\n self.save_hyperparameters()\n\n def configure_optimizers(self):\n optimizer = torch.optim.AdamW(self.ddpm.parameters(), **self.optimizer_config)\n if not self.training_config[\"lr_schedule_type\"] is None:\n scheduler_func = LR_SCHEDULER[self.training_config[\"lr_schedule_type\"]]\n scheduler = scheduler_func(\n optimizer=optimizer, **self.training_config[\"lr_schedule_config\"]\n )\n return [optimizer], [scheduler]\n else:\n return optimizer\n\n def setup(self, stage: Optional[str] = None):\n func = PROCESS_FUNC[self.process_type]\n ft = FILE_TYPE[self.process_type]\n if stage == \"fit\":\n self.train_dataset = func(\n Path(self.training_config[\"datadir\"], f\"train_addprop{ft}\"),\n **self.training_config,\n )\n self.training_config[\"reflection\"] = False # Turn off reflection in val.\n self.val_dataset = func(\n Path(self.training_config[\"datadir\"], f\"valid_addprop{ft}\"),\n **self.training_config,\n )\n elif stage == \"test\":\n self.test_dataset = func(\n Path(self.training_config[\"datadir\"], f\"test{ft}\"),\n **self.training_config,\n )\n else:\n raise NotImplementedError\n\n def train_dataloader(self) -> DataLoader:\n return DataLoader(\n self.train_dataset,\n self.training_config[\"bz\"],\n shuffle=True,\n num_workers=self.training_config[\"num_workers\"],\n collate_fn=self.train_dataset.collate_fn,\n )\n\n def val_dataloader(self) -> DataLoader:\n return DataLoader(\n self.val_dataset,\n self.training_config[\"bz\"],\n shuffle=False,\n num_workers=self.training_config[\"num_workers\"],\n collate_fn=self.val_dataset.collate_fn,\n )\n\n def test_dataloader(self) -> DataLoader:\n return DataLoader(\n self.test_dataset,\n self.training_config[\"bz\"],\n shuffle=False,\n num_workers=self.training_config[\"num_workers\"],\n collate_fn=self.test_dataset.collate_fn,\n )\n\n def compute_loss(self, batch):\n representations, conditions = batch\n loss_terms = self.ddpm.forward(\n representations,\n conditions,\n )\n info = {}\n if not self.pos_only:\n denoms = [\n (self.ddpm.pos_dim + self.ddpm.node_nfs[ii])\n * representations[ii][\"size\"]\n for ii in range(self.n_fragments)\n ]\n else:\n denoms = [\n self.ddpm.pos_dim * representations[ii][\"size\"]\n for ii in range(self.n_fragments)\n ]\n error_t_normalized = [\n loss_terms[\"error_t\"][ii] / denoms[ii] * self.scales[ii]\n for ii in range(self.n_fragments)\n ]\n if self.loss_type == \"l2\" and self.training:\n # normalize loss_t\n loss_t = torch.stack(error_t_normalized, dim=0).sum(dim=0)\n\n # normalize loss_0\n loss_0_x = [\n loss_terms[\"loss_0_x\"][ii]\n * self.scales[ii]\n / (self.ddpm.pos_dim * representations[ii][\"size\"])\n for ii in range(self.n_fragments)\n ]\n loss_0_x = torch.stack(loss_0_x, dim=0).sum(dim=0)\n loss_0_cat = torch.stack(loss_terms[\"loss_0_cat\"], dim=0).sum(dim=0)\n loss_0_charge = torch.stack(loss_terms[\"loss_0_charge\"], dim=0).sum(dim=0)\n loss_0 = loss_0_x + loss_0_cat + loss_0_charge\n\n # VLB objective or evaluation step\n else:\n # Note: SNR_weight should be negative\n error_t = [\n -self.ddpm.T * 0.5 * loss_terms[\"SNR_weight\"] * _error_t\n for _error_t in loss_terms[\"error_t\"]\n ]\n loss_t = torch.stack(error_t, dim=0).sum(dim=0)\n\n loss_0_x = torch.stack(loss_terms[\"loss_0_x\"], dim=0).sum(dim=0)\n loss_0_cat = torch.stack(loss_terms[\"loss_0_cat\"], dim=0).sum(dim=0)\n loss_0_charge = torch.stack(loss_terms[\"loss_0_charge\"], dim=0).sum(dim=0)\n loss_0 = (\n loss_0_x + loss_0_cat + loss_0_charge + loss_terms[\"neg_log_constants\"]\n )\n\n nll = loss_t + loss_0 + loss_terms[\"kl_prior\"]\n # nll = loss_t\n\n for ii in range(self.n_fragments):\n info[f\"error_t_{ii}\"] = error_t_normalized[ii].mean().item() / (\n self.scales[ii] + 1e-4\n )\n info[f\"unorm_error_t_{ii}\"] = loss_terms[\"error_t\"][ii].mean().item()\n\n # Correct for normalization on x.\n if not (self.loss_type == \"l2\" and self.training):\n nll = nll - loss_terms[\"delta_log_px\"]\n\n # Transform conditional nll into joint nll\n # Note:\n # loss = -log p(x,h|N) and log p(x,h,N) = log p(x,h|N) + log p(N)\n # Therefore, log p(x,h|N) = -loss + log p(N)\n # => loss_new = -log p(x,h,N) = loss - log p(N)\n nll = nll - loss_terms[\"log_pN\"]\n\n return nll, info\n\n def eval_inplaint_batch(\n self,\n batch: List,\n resamplings: int = 5,\n jump_length: int = 5,\n frag_fixed: List = [0, 2],\n ):\n sampling_ddpm = copy.deepcopy(self.ddpm)\n sampling_ddpm.schedule = self.sampling_schedule\n sampling_ddpm.T = self.sampling_schedule.gamma_module.timesteps\n sampling_ddpm.eval()\n\n representations, conditions = batch\n xh_fixed = [\n torch.cat(\n [repre[feature_type] for feature_type in FEATURE_MAPPING],\n dim=1,\n )\n for repre in representations\n ]\n n_samples = representations[0][\"size\"].size(0)\n fragments_nodes = [repre[\"size\"] for repre in representations]\n with torch.no_grad():\n out_samples, _ = sampling_ddpm.inpaint(\n n_samples=n_samples,\n fragments_nodes=fragments_nodes,\n conditions=conditions,\n return_frames=1,\n resamplings=resamplings,\n jump_length=jump_length,\n timesteps=None,\n xh_fixed=xh_fixed,\n frag_fixed=frag_fixed,\n )\n rmsds = batch_rmsd(\n fragments_nodes,\n out_samples[0],\n xh_fixed,\n idx=1,\n threshold=0.5,\n )\n return np.mean(rmsds), np.median(rmsds)\n\n def training_step(self, batch, batch_idx):\n nll, info = self.compute_loss(batch)\n loss = nll.mean(0)\n\n self.log(\"train-totloss\", loss, rank_zero_only=True)\n for k, v in info.items():\n self.log(f\"train-{k}\", v, rank_zero_only=True)\n\n if (self.current_epoch + 1) % self.eval_epochs == 0 and batch_idx == 0:\n if self.trainer.is_global_zero:\n print(\n \"evaluation on samping for training batch...\",\n batch[1].shape,\n batch_idx,\n )\n rmsd_mean, rmsd_median = self.eval_inplaint_batch(batch)\n info[\"rmsd\"], info[\"rmsd-median\"] = rmsd_mean, rmsd_median\n else:\n info[\"rmsd\"], info[\"rmsd-median\"] = np.nan, np.nan\n info[\"loss\"] = loss\n return info\n\n def _shared_eval(self, batch, batch_idx, prefix, *args):\n nll, info = self.compute_loss(batch)\n loss = nll.mean(0)\n info[\"totloss\"] = loss.item()\n\n if (self.current_epoch + 1) % self.eval_epochs == 0 and batch_idx == 0:\n if self.trainer.is_global_zero:\n print(\n \"evaluation on samping for validation batch...\",\n batch[1].shape,\n batch_idx,\n )\n info[\"rmsd\"], info[\"rmsd-median\"] = self.eval_inplaint_batch(batch)\n else:\n info[\"rmsd\"], info[\"rmsd-median\"] = np.nan, np.nan\n\n info_prefix = {}\n for k, v in info.items():\n info_prefix[f\"{prefix}-{k}\"] = v\n return info_prefix\n\n def validation_step(self, batch, batch_idx, *args):\n return self._shared_eval(batch, batch_idx, \"val\", *args)\n\n def test_step(self, batch, batch_idx, *args):\n return self._shared_eval(batch, batch_idx, \"test\", *args)\n\n def validation_epoch_end(self, val_step_outputs):\n val_epoch_metrics = average_over_batch_metrics(val_step_outputs)\n if self.trainer.is_global_zero:\n pretty_print(self.current_epoch, val_epoch_metrics, prefix=\"val\")\n val_epoch_metrics.update({\"epoch\": self.current_epoch})\n for k, v in val_epoch_metrics.items():\n self.log(k, v, sync_dist=True)\n\n def training_epoch_end(self, outputs) -> None:\n epoch_metrics = average_over_batch_metrics(\n outputs, allowed=[\"rmsd\", \"rmsd-median\"]\n )\n self.log(\"train-rmsd\", epoch_metrics[\"rmsd\"], sync_dist=True)\n self.log(\"train-rmsd-median\", epoch_metrics[\"rmsd-median\"], sync_dist=True)\n\n def configure_gradient_clipping(\n self, optimizer, optimizer_idx, gradient_clip_val, gradient_clip_algorithm\n ):\n if not self.clip_grad:\n return\n\n # Allow gradient norm to be 150% + 1.5 * stdev of the recent history.\n max_grad_norm = 1.5 * self.gradnorm_queue.mean() + 3 * self.gradnorm_queue.std()\n\n # Get current grad_norm\n params = [p for g in optimizer.param_groups for p in g[\"params\"]]\n grad_norm = utils.get_grad_norm(params)\n\n # Lightning will handle the gradient clipping\n self.clip_gradients(\n optimizer, gradient_clip_val=max_grad_norm, gradient_clip_algorithm=\"norm\"\n )\n\n if float(grad_norm) > max_grad_norm:\n self.gradnorm_queue.add(float(max_grad_norm))\n else:\n self.gradnorm_queue.add(float(grad_norm))\n\n if float(grad_norm) > max_grad_norm:\n print(\n f\"Clipped gradient with value {grad_norm:.1f} \"\n f\"while allowed {max_grad_norm:.1f}\"\n )" }, { "identifier": "ProcessedTS1x", "path": "oa_reactdiff/dataset/transition1x.py", "snippet": "class ProcessedTS1x(BaseDataset):\n def __init__(\n self,\n npz_path,\n center=True,\n pad_fragments=0,\n device=\"cpu\",\n zero_charge=False,\n remove_h=False,\n single_frag_only=True,\n swapping_react_prod=False,\n append_frag=False,\n reflection=False,\n use_by_ind=False,\n only_ts=False,\n confidence_model=False,\n position_key=\"positions\",\n ediff=None,\n **kwargs,\n ):\n super().__init__(\n npz_path=npz_path,\n center=center,\n device=device,\n zero_charge=zero_charge,\n remove_h=remove_h,\n )\n if confidence_model:\n use_by_ind = False\n if remove_h:\n print(\"remove_h is ignored because it is not reasonble for TS.\")\n if single_frag_only:\n single_frag_inds = np.where(\n np.array(self.raw_dataset[\"single_fragment\"]) == 1\n )[0]\n else:\n single_frag_inds = np.array(range(len(self.raw_dataset[\"single_fragment\"])))\n if use_by_ind:\n use_inds = self.raw_dataset[\"use_ind\"]\n else:\n use_inds = range(len(self.raw_dataset[\"single_fragment\"]))\n single_frag_inds = list(set(single_frag_inds).intersection(set(use_inds)))\n\n data_duplicated = copy.deepcopy(self.raw_dataset)\n for k, mapped_k in FRAG_MAPPING.items():\n for v, val in data_duplicated[k].items():\n self.raw_dataset[k][v] = [val[ii] for ii in single_frag_inds]\n if swapping_react_prod:\n mapped_val = data_duplicated[mapped_k][v]\n self.raw_dataset[k][v] += [\n mapped_val[ii] for ii in single_frag_inds\n ]\n if reflection:\n for k, mapped_k in FRAG_MAPPING.items():\n for v, val in self.raw_dataset[k].items():\n if v in [\"wB97x_6-31G(d).forces\", position_key]:\n self.raw_dataset[k][v] += [reflect_z(_val) for _val in val]\n else:\n self.raw_dataset[k][v] += val\n\n self.reactant = self.raw_dataset[\"reactant\"]\n self.transition_state = self.raw_dataset[\"transition_state\"]\n self.product = self.raw_dataset[\"product\"]\n\n self.n_fragments = pad_fragments + 3\n self.device = torch.device(device)\n n_samples = len(self.reactant[\"charges\"])\n self.n_samples = len(self.reactant[\"charges\"])\n\n self.data = {}\n repeat = 2 if swapping_react_prod else 1\n if confidence_model:\n self.data[\"target\"] = torch.tensor(\n self.raw_dataset[\"target\"] * repeat\n ).unsqueeze(1)\n self.data[\"rmsd\"] = torch.tensor(\n self.raw_dataset[\"rmsd\"] * repeat\n ).unsqueeze(1)\n if ediff is not None:\n self.data[\"ediff\"] = torch.tensor(\n self.raw_dataset[ediff][\"ediff\"] * repeat\n ).unsqueeze(1)\n if not only_ts:\n if not append_frag:\n self.process_molecules(\n \"reactant\", n_samples, idx=0, position_key=position_key\n )\n self.process_molecules(\"transition_state\", n_samples, idx=1)\n self.process_molecules(\n \"product\", n_samples, idx=2, position_key=position_key\n )\n else:\n self.process_molecules(\n \"reactant\",\n n_samples,\n idx=0,\n append_charge=0,\n position_key=position_key,\n )\n self.process_molecules(\n \"transition_state\", n_samples, idx=1, append_charge=1\n )\n self.process_molecules(\n \"product\",\n n_samples,\n idx=2,\n append_charge=0,\n position_key=position_key,\n )\n\n for idx in range(pad_fragments):\n self.patch_dummy_molecules(idx + 3)\n else:\n if not append_frag:\n self.process_molecules(\"transition_state\", n_samples, idx=0)\n else:\n self.process_molecules(\n \"transition_state\", n_samples, idx=0, append_charge=1\n )\n # for idx in range(2):\n # self.patch_dummy_molecules(idx + 1)\n\n self.data[\"condition\"] = [\n torch.zeros(\n size=(1, 1),\n dtype=torch.int64,\n device=self.device,\n )\n for _ in range(self.n_samples)\n ]" }, { "identifier": "DiffSchedule", "path": "oa_reactdiff/diffusion/_schedule.py", "snippet": "class DiffSchedule(nn.Module):\n def __init__(self, gamma_module: nn.Module, norm_values: Tuple[float]) -> None:\n super().__init__()\n self.gamma_module = gamma_module\n self.norm_values = norm_values\n self.check_issues_norm_values()\n\n @staticmethod\n def inflate_batch_array(array, target):\n r\"\"\"\n Inflates the batch array (array) with only a single axis\n (i.e. shape = (batch_size,), or possibly more empty axes\n (i.e. shape (batch_size, 1, ..., 1)) to match the target shape.\n \"\"\"\n target_shape = (array.size(0),) + (1,) * (len(target.size()) - 1)\n return array.view(target_shape)\n\n def sigma(self, gamma, target_tensor):\n r\"\"\"Computes sigma given gamma.\"\"\"\n return self.inflate_batch_array(torch.sqrt(torch.sigmoid(gamma)), target_tensor)\n\n def alpha(self, gamma, target_tensor):\n r\"\"\"Computes alpha given gamma.\"\"\"\n return self.inflate_batch_array(\n torch.sqrt(torch.sigmoid(-gamma)), target_tensor\n )\n\n @staticmethod\n def SNR(gamma):\n r\"\"\"Computes signal to noise ratio (alpha^2/sigma^2) given gamma.\"\"\"\n return torch.exp(-gamma)\n\n def sigma_and_alpha_t_given_s(\n self, gamma_t: Tensor, gamma_s: Tensor, target_tensor: Tensor\n ) -> tuple[Tensor, Tensor, Tensor]:\n r\"\"\"\n Computes sigma t given s, using gamma_t and gamma_s. Used during sampling.\n These are defined as:\n alpha t given s = alpha t / alpha s,\n sigma t given s = sqrt(1 - (alpha t given s) ^2 ).\n \"\"\"\n sigma2_t_given_s = self.inflate_batch_array(\n -torch.expm1(F.softplus(gamma_s) - F.softplus(gamma_t)), target_tensor\n )\n\n # alpha_t_given_s = alpha_t / alpha_s\n log_alpha2_t = F.logsigmoid(-gamma_t)\n log_alpha2_s = F.logsigmoid(-gamma_s)\n log_alpha2_t_given_s = log_alpha2_t - log_alpha2_s\n\n alpha_t_given_s = torch.exp(0.5 * log_alpha2_t_given_s)\n alpha_t_given_s = self.inflate_batch_array(alpha_t_given_s, target_tensor)\n\n sigma_t_given_s = torch.sqrt(sigma2_t_given_s)\n\n return sigma2_t_given_s, sigma_t_given_s, alpha_t_given_s\n\n def check_issues_norm_values(self, num_stdevs=8):\n zeros = torch.zeros((1, 1))\n gamma_0 = self.gamma_module(zeros)\n sigma_0 = self.sigma(gamma_0, target_tensor=zeros).item()\n\n # Checked if 1 / norm_value is still larger than 10 * standard\n # deviation.\n norm_value = self.norm_values[1]\n\n if sigma_0 * num_stdevs > 1.0 / norm_value:\n raise ValueError(\n f\"Value for normalization value {norm_value} probably too \"\n f\"large with sigma_0 {sigma_0:.5f} and \"\n f\"1 / norm_value = {1. / norm_value}\"\n )" }, { "identifier": "PredefinedNoiseSchedule", "path": "oa_reactdiff/diffusion/_schedule.py", "snippet": "class PredefinedNoiseSchedule(nn.Module):\n r\"\"\"\n Predefined noise schedule. Essentially creates a lookup array for predefined\n (non-learned) noise schedules.\n \"\"\"\n\n def __init__(\n self,\n noise_schedule: str,\n timesteps: int,\n precision: float,\n ):\n super().__init__()\n self.timesteps = timesteps\n\n if \"cosine\" in noise_schedule:\n splits = noise_schedule.split(\"_\")\n assert len(splits) <= 2\n power = 1 if len(splits) == 1 else float(splits[1])\n alphas2 = cosine_beta_schedule(timesteps, raise_to_power=power)\n elif \"polynomial\" in noise_schedule:\n splits = noise_schedule.split(\"_\")\n assert len(splits) == 2\n power = float(splits[1])\n alphas2 = polynomial_schedule(timesteps, s=precision, power=power)\n elif \"csin\" in noise_schedule:\n splits = noise_schedule.split(\"_\")\n assert len(splits) == 4\n start, end, tau = float(splits[1]), float(splits[2]), float(splits[3])\n alphas2 = ccosine_schedule(timesteps, start=start, end=end, tau=tau)\n elif \"linear\" in noise_schedule:\n alphas2 = linear_schedule(timesteps)\n else:\n raise ValueError(noise_schedule)\n\n # print(\"alphas2\", alphas2)\n\n sigmas2 = 1 - alphas2\n\n log_alphas2 = np.log(alphas2)\n log_sigmas2 = np.log(sigmas2)\n\n log_alphas2_to_sigmas2 = log_alphas2 - log_sigmas2\n\n # print(\"gamma\", -log_alphas2_to_sigmas2)\n\n self.gamma = torch.nn.Parameter(\n torch.from_numpy(-log_alphas2_to_sigmas2).float(), requires_grad=False\n )\n\n def forward(self, t):\n t_int = torch.round(t * self.timesteps).long()\n return self.gamma[t_int]" }, { "identifier": "FEATURE_MAPPING", "path": "oa_reactdiff/diffusion/_normalizer.py", "snippet": "FEATURE_MAPPING = [\"pos\", \"one_hot\", \"charge\"]" }, { "identifier": "batch_rmsd", "path": "oa_reactdiff/analyze/rmsd.py", "snippet": "def batch_rmsd(\n fragments_nodes: List[Tensor],\n out_samples: List[Tensor],\n xh: List[Tensor],\n idx: int = 1,\n threshold=0.5,\n):\n rmsds = []\n out_samples_use = out_samples[idx]\n xh_use = xh[idx]\n nodes = fragments_nodes[idx].long().cpu().numpy()\n start_ind, end_ind = 0, 0\n for jj, natoms in enumerate(nodes):\n end_ind += natoms\n mol1 = xh2pmg(out_samples_use[start_ind:end_ind])\n mol2 = xh2pmg(xh_use[start_ind:end_ind])\n try:\n rmsd = pymatgen_rmsd(mol1, mol2, ignore_chirality=True, threshold=threshold)\n except:\n rmsd = 1.0\n rmsds.append(min(rmsd, 1.0))\n start_ind = end_ind\n return rmsds" }, { "identifier": "assemble_sample_inputs", "path": "oa_reactdiff/utils/sampling_tools.py", "snippet": "def assemble_sample_inputs(\n atoms: List,\n device: torch.device = torch.device(\"cuda\"),\n n_samples: int = 1,\n frag_type: bool = False,\n):\n empty_site = torch.tensor([[1, 0, 0, 0, 0, 1]], device=device)\n if not frag_type:\n decoders = [\n {\n \"H\": [1, 0, 0, 0, 0, 1],\n \"C\": [0, 1, 0, 0, 0, 6],\n \"N\": [0, 0, 1, 0, 0, 7],\n \"O\": [0, 0, 0, 1, 0, 8],\n \"F\": [0, 0, 0, 0, 1, 9],\n }\n ] * 2\n else:\n decoders = [\n {\n \"H\": [1, 0, 0, 0, 0, 1, 0],\n \"C\": [0, 1, 0, 0, 0, 6, 0],\n \"N\": [0, 0, 1, 0, 0, 7, 0],\n \"O\": [0, 0, 0, 1, 0, 8, 0],\n \"F\": [0, 0, 0, 0, 1, 9, 0],\n },\n {\n \"H\": [1, 0, 0, 0, 0, 1, 1],\n \"C\": [0, 1, 0, 0, 0, 6, 1],\n \"N\": [0, 0, 1, 0, 0, 7, 1],\n \"O\": [0, 0, 0, 1, 0, 8, 1],\n \"F\": [0, 0, 0, 0, 1, 9, 1],\n },\n ]\n\n h0 = [\n torch.cat(\n [\n torch.tensor([decoders[ii % 2][atom] for atom in atoms], device=device)\n for _ in range(n_samples)\n ]\n )\n for ii in range(3)\n ]\n return h0" }, { "identifier": "write_tmp_xyz", "path": "oa_reactdiff/utils/sampling_tools.py", "snippet": "def write_tmp_xyz(\n fragments_nodes, out_samples, idx=[0], prefix=\"gen\", localpath=\"tmp\", ex_ind=0\n):\n TYPEMAP = {\n 0: \"react\",\n 1: \"ts\",\n 2: \"prod\",\n }\n for ii in idx:\n st = TYPEMAP[ii]\n start_ind, end_ind = 0, 0\n for jj, natoms in enumerate(fragments_nodes[0]):\n _jj = jj + ex_ind\n xyzfile = f\"{localpath}/{prefix}_{_jj}_{st}.xyz\"\n end_ind += natoms.item()\n write_single_xyz(\n xyzfile,\n natoms.item(),\n out=out_samples[ii][start_ind:end_ind],\n )\n start_ind = end_ind" }, { "identifier": "xyz2pmg", "path": "oa_reactdiff/analyze/rmsd.py", "snippet": "def xyz2pmg(xyzfile):\n xyz_converter = XYZ(mol=None)\n mol = xyz_converter.from_file(xyzfile).molecule\n return mol" }, { "identifier": "pymatgen_rmsd", "path": "oa_reactdiff/analyze/rmsd.py", "snippet": "def pymatgen_rmsd(\n mol1,\n mol2,\n ignore_chirality=False,\n threshold=0.5,\n same_order=False,\n):\n if isinstance(mol1, str):\n mol1 = xyz2pmg(mol1)\n if isinstance(mol2, str):\n mol2 = xyz2pmg(mol2)\n rmsd = rmsd_core(mol1, mol2, threshold)\n if ignore_chirality:\n coords = mol2.cart_coords\n coords[:, -1] = -coords[:, -1]\n mol2_reflect = Molecule(\n species=mol2.species,\n coords=coords,\n )\n rmsd_reflect = rmsd_core(mol1, mol2_reflect, threshold)\n rmsd = min(rmsd, rmsd_reflect)\n return rmsd" }, { "identifier": "pymatgen_rmsd", "path": "oa_reactdiff/analyze/rmsd.py", "snippet": "def pymatgen_rmsd(\n mol1,\n mol2,\n ignore_chirality=False,\n threshold=0.5,\n same_order=False,\n):\n if isinstance(mol1, str):\n mol1 = xyz2pmg(mol1)\n if isinstance(mol2, str):\n mol2 = xyz2pmg(mol2)\n rmsd = rmsd_core(mol1, mol2, threshold)\n if ignore_chirality:\n coords = mol2.cart_coords\n coords[:, -1] = -coords[:, -1]\n mol2_reflect = Molecule(\n species=mol2.species,\n coords=coords,\n )\n rmsd_reflect = rmsd_core(mol1, mol2_reflect, threshold)\n rmsd = min(rmsd, rmsd_reflect)\n return rmsd" } ]
import torch import py3Dmol import numpy as np import plotly.express as px import json from typing import Optional from torch import tensor from e3nn import o3 from torch_scatter import scatter_mean from oa_reactdiff.model import LEFTNet from oa_reactdiff.tests.model.utils import ( generate_full_eij, get_cut_graph_mask, ) from torch.utils.data import DataLoader from oa_reactdiff.trainer.pl_trainer import DDPMModule from oa_reactdiff.dataset import ProcessedTS1x from oa_reactdiff.diffusion._schedule import DiffSchedule, PredefinedNoiseSchedule from oa_reactdiff.diffusion._normalizer import FEATURE_MAPPING from oa_reactdiff.analyze.rmsd import batch_rmsd from oa_reactdiff.utils.sampling_tools import ( assemble_sample_inputs, write_tmp_xyz, ) from glob import glob from oa_reactdiff.analyze.rmsd import xyz2pmg, pymatgen_rmsd from pymatgen.core import Molecule from collections import OrderedDict from sklearn.cluster import KMeans from glob import glob from pymatgen.io.xyz import XYZ from openbabel import pybel from oa_reactdiff.analyze.rmsd import pymatgen_rmsd
13,278
use_by_ind=True, ) loader = DataLoader( dataset, batch_size=1, shuffle=False, num_workers=0, collate_fn=dataset.collate_fn ) itl = iter(loader) idx = -1 for _ in range(4): representations, res = next(itl) idx += 1 n_samples = representations[0]["size"].size(0) fragments_nodes = [ repre["size"] for repre in representations ] conditions = torch.tensor([[0] for _ in range(n_samples)], device=device) new_order_react = torch.randperm(representations[0]["size"].item()) for k in ["pos", "one_hot", "charge"]: representations[0][k] = representations[0][k][new_order_react] xh_fixed = [ torch.cat( [repre[feature_type] for feature_type in FEATURE_MAPPING], dim=1, ) for repre in representations ] out_samples, out_masks = ddpm_trainer.ddpm.inpaint( n_samples=n_samples, fragments_nodes=fragments_nodes, conditions=conditions, return_frames=1, resamplings=5, jump_length=5, timesteps=None, xh_fixed=xh_fixed, frag_fixed=[0, 2], ) rmsds = batch_rmsd( fragments_nodes, out_samples[0], xh_fixed, idx=1, ) write_tmp_xyz( fragments_nodes, out_samples[0], idx=[0, 1, 2], localpath="demo/inpainting" ) rmsds = [min(1, _x) for _x in rmsds] [(ii, round(rmsd, 2)) for ii, rmsd in enumerate(rmsds)], np.mean(rmsds), np.median(rmsds) print("Cell 33, Done") def draw_reaction(react_path: str, idx: int = 0, prefix: str = "gen") -> py3Dmol.view: """画出反应的的{反应物,过渡态,生成物} Args: react_path (str): path to the reaction. idx (int, optional): index for the generated reaction. Defaults to 0. prefix (str, optional): prefix for distinguishing true sample and generated structure. Defaults to "gen". Returns: py3Dmol.view: _description_ """ with open(f"{react_path}/{prefix}_{idx}_react.xyz", "r") as fo: natoms = int(fo.readline()) * 3 mol = f"{natoms}\n\n" for ii, t in enumerate(["react", "ts", "prod"]): pmatg_mol = xyz2pmg(f"{react_path}/{prefix}_{idx}_{t}.xyz") pmatg_mol_prime = Molecule( species=pmatg_mol.atomic_numbers, coords=pmatg_mol.cart_coords + 8 * ii, ) mol += "\n".join(pmatg_mol_prime.to(fmt="xyz").split("\n")[2:]) + "\n" viewer = py3Dmol.view(1024, 576) viewer.addModel(mol, "xyz") viewer.setStyle({'stick': {}, "sphere": {"radius": 0.3}}) viewer.zoomTo() return viewer opt_ts_path = "./demo/example-3/opt_ts/" opt_ts_xyzs = glob(f"{opt_ts_path}/*ts.opt.xyz") order_dict = {} for xyz in opt_ts_xyzs: order_dict.update( {int(xyz.split("/")[-1].split(".")[0]): xyz} ) order_dict = OrderedDict(sorted(order_dict.items())) opt_ts_xyzs = [] ind_dict = {} for ii, v in enumerate(order_dict.values()): opt_ts_xyzs.append(v) ind_dict.update( {ii: v} ) n_ts = len(opt_ts_xyzs) rmsd_mat = np.ones((n_ts, n_ts)) * -2.5 for ii in range(n_ts): for jj in range(ii+1, n_ts): try: rmsd_mat[ii, jj] = np.log10(
# --- 导入和定义一些函数 ---- default_float = torch.float64 torch.set_default_dtype(default_float) # 使用双精度,测试更准确 def remove_mean_batch( x: tensor, indices: Optional[tensor] = None ) -> tensor: """将x中的每个batch的均值去掉 Args: x (tensor): input tensor. indices (Optional[tensor], optional): batch indices. Defaults to None. Returns: tensor: output tensor with batch mean as 0. """ if indices == None: return x - torch.mean(x, dim=0) mean = scatter_mean(x, indices, dim=0) x = x - mean[indices] return x def draw_in_3dmol(mol: str, fmt: str = "xyz") -> py3Dmol.view: """画分子 Args: mol (str): str content of molecule. fmt (str, optional): format. Defaults to "xyz". Returns: py3Dmol.view: output viewer """ viewer = py3Dmol.view(1024, 576) viewer.addModel(mol, fmt) viewer.setStyle({'stick': {}, "sphere": {"radius": 0.36}}) viewer.zoomTo() return viewer def assemble_xyz(z: list, pos: tensor) -> str: """将原子序数和位置组装成xyz格式 Args: z (list): chemical elements pos (tensor): 3D coordinates Returns: str: xyz string """ natoms =len(z) xyz = f"{natoms}\n\n" for _z, _pos in zip(z, pos.numpy()): xyz += f"{_z}\t" + "\t".join([str(x) for x in _pos]) + "\n" return xyz num_layers = 2 hidden_channels = 8 in_hidden_channels = 4 num_radial = 4 model = LEFTNet( num_layers=num_layers, hidden_channels=hidden_channels, in_hidden_channels=in_hidden_channels, num_radial=num_radial, object_aware=False, ) sum(p.numel() for p in model.parameters() if p.requires_grad) h = torch.rand(3, in_hidden_channels) z = ["O", "H", "H"] pos = tensor([ [0, 0, 0], [1, 0, 0], [0, 1, 0], ]).double() # 方便起见,我们这里把H-O-H的角度设为90度 edge_index = tensor([ [0, 0, 1, 1, 2, 2], [1, 2, 0, 2, 0, 1] ]).long() # 使用全连接的方式,这里的边是无向的 _h, _pos, __ = model.forward( h=h, pos=remove_mean_batch(pos), edge_index=edge_index, ) rot = o3.rand_matrix() pos_rot = torch.matmul(pos, rot).double() _h_rot, _pos_rot, __ = model.forward( h=h, pos=remove_mean_batch(pos_rot), edge_index=edge_index, ) torch.max( torch.abs( _h - _h_rot ) ) # 旋转后的h应该不变 torch.max( torch.abs( torch.matmul(_pos, rot).double() - _pos_rot ) ) # 旋转后的pos应该旋转 print("At Cell 9, Done.") # --- Cell 9 --- ns = [3, ] + [2, 1] # 反应物 3个原子 (H2O),生成物 2个原子 (H2),1个原子 (O自由基) ntot = np.sum(ns) mask = tensor([0, 0, 0, 1, 1, 1]) # 用于区分反应物和生成物 z = ["O", "H", "H"] + ["H", "H", "O"] pos_react = tensor([ [0, 0, 0], [1, 0, 0], [0, 1, 0], ]).double() # 方便起见,我们这里把H-O-H的角度设为90度 pos_prod = tensor([ [0, 3, -0.4], [0, 3, 0.4], [0, -3, 0], ]) # 将H2和O自由基分开 pos = torch.cat( [pos_react, pos_prod], dim=0, ) # 拼接 h = torch.rand(ntot, in_hidden_channels) edge_index = generate_full_eij(ntot) edge_index _h, _pos, __ = model.forward( h=h, pos=remove_mean_batch(pos, mask), edge_index=edge_index, ) rot = o3.rand_matrix() pos_react_rot = torch.matmul(pos_react, rot).double() pos_rot = torch.cat( [pos_react_rot, pos_prod], dim=0, ) # 拼接旋转过后的H2O和未旋转的H2和O自由基 _h_rot, _pos_rot, __ = model.forward( h=h, pos=remove_mean_batch(pos_rot, mask), edge_index=edge_index, ) torch.max( torch.abs( _h - _h_rot ) ) # 旋转后的h应该不变 _pos_rot_prime = torch.cat( [ torch.matmul(_pos[:3], rot), _pos[3:] ] ) torch.max( torch.abs( _pos_rot_prime - _pos_rot ) ) # 旋转后的pos应该旋转 print("At Cell 16, Done.") model_oa = LEFTNet( num_layers=num_layers, hidden_channels=hidden_channels, in_hidden_channels=in_hidden_channels, num_radial=num_radial, object_aware=True, # 使用object-aware模型 ) subgraph_mask = get_cut_graph_mask(edge_index, 3) # 0-2是反应物的原子数 edge_index.T[torch.where(subgraph_mask.squeeze()>0)[0]] _h, _pos, __ = model_oa.forward( h=h, pos=remove_mean_batch(pos, mask), edge_index=edge_index, subgraph_mask=subgraph_mask, ) rot = o3.rand_matrix() pos_react_rot = torch.matmul(pos_react, rot).double() pos_rot = torch.cat( [pos_react_rot, pos_prod], dim=0, ) _h_rot, _pos_rot, __ = model_oa.forward( h=h, pos=remove_mean_batch(pos_rot, mask), edge_index=edge_index, subgraph_mask=subgraph_mask, ) torch.max( torch.abs( _h - _h_rot ) ) # 旋转后的h应该不变 _pos_rot_prime = torch.cat( [ torch.matmul(_pos[:3], rot), _pos[3:] ] ) torch.max( torch.abs( _pos_rot_prime - _pos_rot ) ) # 旋转后的pos应该旋转 print("Cell 22, done") device = torch.device("cpu") if not torch.cuda.is_available() else torch.device("cuda") ddpm_trainer = DDPMModule.load_from_checkpoint( checkpoint_path="./pretrained-ts1x-diff.ckpt", map_location=device, ) ddpm_trainer = ddpm_trainer.to(device) noise_schedule: str = "polynomial_2" timesteps: int = 150 precision: float = 1e-5 gamma_module = PredefinedNoiseSchedule( noise_schedule=noise_schedule, timesteps=timesteps, precision=precision, ) schedule = DiffSchedule( gamma_module=gamma_module, norm_values=ddpm_trainer.ddpm.norm_values ) ddpm_trainer.ddpm.schedule = schedule ddpm_trainer.ddpm.T = timesteps ddpm_trainer = ddpm_trainer.to(device) dataset = ProcessedTS1x( npz_path="./oa_reactdiff/data/transition1x/train.pkl", center=True, pad_fragments=0, device=device, zero_charge=False, remove_h=False, single_frag_only=False, swapping_react_prod=False, use_by_ind=True, ) loader = DataLoader( dataset, batch_size=1, shuffle=False, num_workers=0, collate_fn=dataset.collate_fn ) itl = iter(loader) idx = -1 for _ in range(4): representations, res = next(itl) idx += 1 n_samples = representations[0]["size"].size(0) fragments_nodes = [ repre["size"] for repre in representations ] conditions = torch.tensor([[0] for _ in range(n_samples)], device=device) new_order_react = torch.randperm(representations[0]["size"].item()) for k in ["pos", "one_hot", "charge"]: representations[0][k] = representations[0][k][new_order_react] xh_fixed = [ torch.cat( [repre[feature_type] for feature_type in FEATURE_MAPPING], dim=1, ) for repre in representations ] out_samples, out_masks = ddpm_trainer.ddpm.inpaint( n_samples=n_samples, fragments_nodes=fragments_nodes, conditions=conditions, return_frames=1, resamplings=5, jump_length=5, timesteps=None, xh_fixed=xh_fixed, frag_fixed=[0, 2], ) rmsds = batch_rmsd( fragments_nodes, out_samples[0], xh_fixed, idx=1, ) write_tmp_xyz( fragments_nodes, out_samples[0], idx=[0, 1, 2], localpath="demo/inpainting" ) rmsds = [min(1, _x) for _x in rmsds] [(ii, round(rmsd, 2)) for ii, rmsd in enumerate(rmsds)], np.mean(rmsds), np.median(rmsds) print("Cell 33, Done") def draw_reaction(react_path: str, idx: int = 0, prefix: str = "gen") -> py3Dmol.view: """画出反应的的{反应物,过渡态,生成物} Args: react_path (str): path to the reaction. idx (int, optional): index for the generated reaction. Defaults to 0. prefix (str, optional): prefix for distinguishing true sample and generated structure. Defaults to "gen". Returns: py3Dmol.view: _description_ """ with open(f"{react_path}/{prefix}_{idx}_react.xyz", "r") as fo: natoms = int(fo.readline()) * 3 mol = f"{natoms}\n\n" for ii, t in enumerate(["react", "ts", "prod"]): pmatg_mol = xyz2pmg(f"{react_path}/{prefix}_{idx}_{t}.xyz") pmatg_mol_prime = Molecule( species=pmatg_mol.atomic_numbers, coords=pmatg_mol.cart_coords + 8 * ii, ) mol += "\n".join(pmatg_mol_prime.to(fmt="xyz").split("\n")[2:]) + "\n" viewer = py3Dmol.view(1024, 576) viewer.addModel(mol, "xyz") viewer.setStyle({'stick': {}, "sphere": {"radius": 0.3}}) viewer.zoomTo() return viewer opt_ts_path = "./demo/example-3/opt_ts/" opt_ts_xyzs = glob(f"{opt_ts_path}/*ts.opt.xyz") order_dict = {} for xyz in opt_ts_xyzs: order_dict.update( {int(xyz.split("/")[-1].split(".")[0]): xyz} ) order_dict = OrderedDict(sorted(order_dict.items())) opt_ts_xyzs = [] ind_dict = {} for ii, v in enumerate(order_dict.values()): opt_ts_xyzs.append(v) ind_dict.update( {ii: v} ) n_ts = len(opt_ts_xyzs) rmsd_mat = np.ones((n_ts, n_ts)) * -2.5 for ii in range(n_ts): for jj in range(ii+1, n_ts): try: rmsd_mat[ii, jj] = np.log10(
pymatgen_rmsd(
13
2023-10-30 02:53:38+00:00
16k
Weitheskmt/WeiDMD
build/lib/weidmd/bopdmd.py
[ { "identifier": "DMDBase", "path": "build/lib/weidmd/dmdbase.py", "snippet": "class DMDBase:\n \"\"\"\n Dynamic Mode Decomposition base class.\n\n :param svd_rank: the rank for the truncation; If 0, the method computes the\n optimal rank and uses it for truncation; if positive interger, the\n method uses the argument for the truncation; if float between 0 and 1,\n the rank is the number of the biggest singular values that are needed\n to reach the 'energy' specified by `svd_rank`; if -1, the method does\n not compute truncation.\n :type svd_rank: int or float\n :param int tlsq_rank: rank truncation computing Total Least Square. Default\n is 0, that means no truncation.\n :param bool exact: flag to compute either exact DMD or projected DMD.\n Default is False.\n :param opt: If True, amplitudes are computed like in optimized DMD (see\n :func:`~dmdbase.DMDBase._compute_amplitudes` for reference). If\n False, amplitudes are computed following the standard algorithm. If\n `opt` is an integer, it is used as the (temporal) index of the snapshot\n used to compute DMD modes amplitudes (following the standard\n algorithm).\n The reconstruction will generally be better in time instants near the\n chosen snapshot; however increasing `opt` may lead to wrong results\n when the system presents small eigenvalues. For this reason a manual\n selection of the number of eigenvalues considered for the analyisis may\n be needed (check `svd_rank`). Also setting `svd_rank` to a value\n between 0 and 1 may give better results. Default is False.\n :type opt: bool or int\n :param rescale_mode: Scale Atilde as shown in\n 10.1016/j.jneumeth.2015.10.010 (section 2.4) before computing its\n eigendecomposition. None means no rescaling, 'auto' means automatic\n rescaling using singular values, otherwise the scaling factors.\n :type rescale_mode: {'auto'} or None or numpy.ndarray\n :param bool forward_backward: If True, the low-rank operator is computed\n like in fbDMD (reference: https://arxiv.org/abs/1507.02264). Default is\n False.\n :param sorted_eigs: Sort eigenvalues (and modes/dynamics accordingly) by\n magnitude if `sorted_eigs='abs'`, by real part (and then by imaginary\n part to break ties) if `sorted_eigs='real'`. Default: False.\n :type sorted_eigs: {'real', 'abs'} or False\n :param tikhonov_regularization: Tikhonov parameter for the regularization.\n If `None`, no regularization is applied, if `float`, it is used as the\n :math:`\\\\lambda` tikhonov parameter.\n :type tikhonov_regularization: int or float\n\n :cvar dict original_time: dictionary that contains information about the\n time window where the system is sampled:\n\n - `t0` is the time of the first input snapshot;\n - `tend` is the time of the last input snapshot;\n - `dt` is the delta time between the snapshots.\n\n :cvar dict dmd_time: dictionary that contains information about the time\n window where the system is reconstructed:\n\n - `t0` is the time of the first approximated solution;\n - `tend` is the time of the last approximated solution;\n - `dt` is the delta time between the approximated solutions.\n\n \"\"\"\n\n def __init__(\n self,\n svd_rank=0,\n tlsq_rank=0,\n exact=False,\n opt=False,\n rescale_mode=None,\n forward_backward=False,\n sorted_eigs=False,\n tikhonov_regularization=None,\n ):\n self._Atilde = DMDOperator(\n svd_rank=svd_rank,\n exact=exact,\n rescale_mode=rescale_mode,\n forward_backward=forward_backward,\n sorted_eigs=sorted_eigs,\n tikhonov_regularization=tikhonov_regularization,\n )\n\n self._tlsq_rank = tlsq_rank\n self._original_time = None\n self._dmd_time = None\n self._opt = opt\n self._exact = exact\n\n self._b = None # amplitudes\n self._snapshots_holder = None\n\n self._modes_activation_bitmask_proxy = None\n\n @property\n def dmd_timesteps(self):\n \"\"\"\n Get the timesteps of the reconstructed states.\n\n :return: the time intervals of the original snapshots.\n :rtype: numpy.ndarray\n \"\"\"\n return np.arange(\n self.dmd_time[\"t0\"],\n self.dmd_time[\"tend\"] + self.dmd_time[\"dt\"],\n self.dmd_time[\"dt\"],\n )\n\n @property\n def original_timesteps(self):\n \"\"\"\n Get the timesteps of the original snapshot.\n\n :return: the time intervals of the original snapshots.\n :rtype: numpy.ndarray\n \"\"\"\n return np.arange(\n self.original_time[\"t0\"],\n self.original_time[\"tend\"] + self.original_time[\"dt\"],\n self.original_time[\"dt\"],\n )\n\n @property\n def modes(self):\n \"\"\"\n Get the matrix containing the DMD modes, stored by column.\n\n :return: the matrix containing the DMD modes.\n :rtype: numpy.ndarray\n \"\"\"\n if self.fitted:\n if not self._modes_activation_bitmask_proxy:\n self._allocate_modes_bitmask_proxy()\n # if the value is still None, it means that we cannot create\n # the proxy at the moment\n if not self._modes_activation_bitmask_proxy:\n return self.operator.modes\n return self._modes_activation_bitmask_proxy.modes\n\n @property\n def operator(self):\n \"\"\"\n Get the instance of DMDOperator.\n\n :return: the instance of DMDOperator\n :rtype: DMDOperator\n \"\"\"\n return self._Atilde\n\n @property\n def eigs(self):\n \"\"\"\n Get the eigenvalues of A tilde.\n\n :return: the eigenvalues from the eigendecomposition of `atilde`.\n :rtype: numpy.ndarray\n \"\"\"\n if self.fitted:\n if not self._modes_activation_bitmask_proxy:\n self._allocate_modes_bitmask_proxy()\n # if the value is still None, it means that we cannot create\n # the proxy at the moment\n if not self._modes_activation_bitmask_proxy:\n return self.operator.eigenvalues\n return self._modes_activation_bitmask_proxy.eigs\n\n @property\n def dynamics(self):\n \"\"\"\n Get the time evolution of each mode.\n\n .. math::\n\n \\\\mathbf{x}(t) \\\\approx\n \\\\sum_{k=1}^{r} \\\\boldsymbol{\\\\phi}_{k} \\\\exp \\\\left( \\\\omega_{k} t\n \\\\right) b_{k} = \\\\sum_{k=1}^{r} \\\\boldsymbol{\\\\phi}_{k} \\\\left(\n \\\\lambda_{k} \\\\right)^{\\\\left( t / \\\\Delta t \\\\right)} b_{k}\n\n :return: the matrix that contains all the time evolution, stored by\n row.\n :rtype: numpy.ndarray\n \"\"\"\n temp = np.repeat(\n self.eigs[:, None], self.dmd_timesteps.shape[0], axis=1\n )\n tpow = (\n self.dmd_timesteps - self.original_time[\"t0\"]\n ) // self.original_time[\"dt\"]\n\n # The new formula is x_(k+j) = \\Phi \\Lambda^k \\Phi^(-1) x_j.\n # Since j is fixed, for a given snapshot \"u\" we have the following\n # formula:\n # x_u = \\Phi \\Lambda^{u-j} \\Phi^(-1) x_j\n # Therefore tpow must be scaled appropriately.\n tpow = self._translate_eigs_exponent(tpow)\n\n return np.power(temp, tpow) * self.amplitudes[:, None]\n\n def _translate_eigs_exponent(self, tpow):\n \"\"\"\n Transforms the exponent of the eigenvalues in the dynamics formula\n according to the selected value of `self._opt` (check the documentation\n for `opt` in :func:`__init__ <dmdbase.DMDBase.__init__>`).\n\n :param tpow: the exponent(s) of Sigma in the original DMD formula.\n :type tpow: int or np.ndarray\n :return: the exponent(s) adjusted according to `self._opt`\n :rtype: int or np.ndarray\n \"\"\"\n\n if isinstance(self._opt, bool):\n amplitudes_snapshot_index = 0\n else:\n amplitudes_snapshot_index = self._opt\n\n if amplitudes_snapshot_index < 0:\n # we take care of negative indexes: -n becomes T - n\n return tpow - (self.snapshots.shape[1] + amplitudes_snapshot_index)\n else:\n return tpow - amplitudes_snapshot_index\n\n @property\n def reconstructed_data(self):\n \"\"\"\n Get the reconstructed data.\n\n :return: the matrix that contains the reconstructed snapshots.\n :rtype: numpy.ndarray\n \"\"\"\n return self.modes.dot(self.dynamics)\n\n @property\n def snapshots(self):\n \"\"\"\n Get the input data (space flattened).\n\n :return: the matrix that contains the flattened snapshots.\n :rtype: numpy.ndarray\n \"\"\"\n if self._snapshots_holder:\n return self._snapshots_holder.snapshots\n return None\n\n @property\n def snapshots_shape(self):\n \"\"\"\n Get the original input snapshot shape.\n\n :return: input snapshots shape.\n :rtype: tuple\n \"\"\"\n if self._snapshots_holder:\n return self._snapshots_holder.snapshots_shape\n return None\n\n @property\n def frequency(self):\n \"\"\"\n Get the amplitude spectrum.\n\n :return: the array that contains the frequencies of the eigenvalues.\n :rtype: numpy.ndarray\n \"\"\"\n return np.log(self.eigs).imag / (2 * np.pi * self.original_time[\"dt\"])\n\n @property\n def growth_rate(self): # To check\n \"\"\"\n Get the growth rate values relative to the modes.\n\n :return: the Floquet values\n :rtype: numpy.ndarray\n \"\"\"\n return self.eigs.real / self.original_time[\"dt\"]\n\n @property\n def amplitudes(self):\n \"\"\"\n Get the coefficients that minimize the error between the original\n system and the reconstructed one. For futher information, see\n `dmdbase._compute_amplitudes`.\n\n :return: the array that contains the amplitudes coefficient.\n :rtype: numpy.ndarray\n \"\"\"\n if self.fitted:\n if not self._modes_activation_bitmask_proxy:\n self._allocate_modes_bitmask_proxy()\n return self._modes_activation_bitmask_proxy.amplitudes\n\n @property\n def fitted(self):\n \"\"\"Check whether this DMD instance has been fitted.\n\n :return: `True` is the instance has been fitted, `False` otherwise.\n :rtype: bool\n \"\"\"\n try:\n return self.operator.modes is not None\n except (ValueError, AttributeError):\n return False\n\n @property\n def modes_activation_bitmask(self):\n \"\"\"\n Get the bitmask which controls which DMD modes are enabled at the\n moment in this DMD instance.\n\n The DMD instance must be fitted before this property becomes valid.\n After :func:`fit` is called, the defalt value of\n `modes_activation_bitmask` is an array of `True` values of the same\n shape of :func:`amplitudes`.\n\n The array returned is read-only (this allow us to react appropriately\n to changes in the bitmask). In order to modify the bitmask you need to\n set the field to a brand-new value (see example below).\n\n Example:\n\n .. code-block:: python\n\n >>> # this is an error\n >>> dmd.modes_activation_bitmask[[1,2]] = False\n ValueError: assignment destination is read-only\n >>> tmp = np.array(dmd.modes_activation_bitmask)\n >>> tmp[[1,2]] = False\n >>> dmd.modes_activation_bitmask = tmp\n\n :return: The DMD modes activation bitmask.\n :rtype: numpy.ndarray\n \"\"\"\n # check that the DMD was fitted\n if not self.fitted:\n raise RuntimeError(\"This DMD instance has not been fitted yet.\")\n\n if not self._modes_activation_bitmask_proxy:\n self._allocate_modes_bitmask_proxy()\n\n bitmask = self._modes_activation_bitmask_proxy.old_bitmask\n # make sure that the array is immutable\n bitmask.flags.writeable = False\n return bitmask\n\n @modes_activation_bitmask.setter\n def modes_activation_bitmask(self, value):\n # check that the DMD was fitted\n if not self.fitted:\n raise RuntimeError(\"This DMD instance has not been fitted yet.\")\n\n value = np.array(value)\n if value.dtype != bool:\n raise RuntimeError(\n \"Unxpected dtype, expected bool, got {}.\".format(value.dtype)\n )\n\n # check that the shape is correct\n if value.shape != self.modes_activation_bitmask.shape:\n raise ValueError(\n \"Expected shape {}, got {}\".format(\n self.modes_activation_bitmask.shape, value.shape\n )\n )\n\n self._modes_activation_bitmask_proxy.change_bitmask(value)\n\n def _allocate_modes_bitmask_proxy(self):\n \"\"\"\n Utility method which allocates the activation bitmask proxy using the\n quantities that are currently available in this DMD instance. Fails\n quietly if the amplitudes are not set.\n \"\"\"\n if hasattr(self, \"_b\") and self._b is not None:\n self._modes_activation_bitmask_proxy = ActivationBitmaskProxy(\n self.operator, self._b\n )\n\n def __getitem__(self, key):\n \"\"\"\n Restrict the DMD modes used by this instance to a subset of indexes\n specified by keys. The value returned is a shallow copy of this DMD\n instance, with a different value in :func:`modes_activation_bitmask`.\n Therefore assignments to attributes are not reflected into the original\n instance.\n\n However the DMD instance returned should not be used for low-level\n manipulations on DMD modes, since the underlying DMD operator is shared\n with the original instance. For this reasons modifications to NumPy\n arrays may result in unwanted and unspecified situations which should\n be avoided in principle.\n\n :param key: An index (integer), slice or list of indexes.\n :type key: int or slice or list or np.ndarray\n :return: A shallow copy of this DMD instance having only a subset of\n DMD modes which are those indexed by `key`.\n :rtype: DMDBase\n \"\"\"\n\n if isinstance(key, (slice, int, list, np.ndarray)):\n filter_function = lambda x: isinstance(x, int)\n\n if isinstance(key, (list, np.ndarray)):\n if not all(map(filter_function, key)):\n raise ValueError(\n \"Invalid argument type, expected a slice, an int, or \"\n \"a list of indexes.\"\n )\n # no repeated elements\n if len(key) != len(set(key)):\n raise ValueError(\"Repeated indexes are not supported.\")\n else:\n raise ValueError(\n \"Invalid argument type, expected a slice, an int, or a list \"\n \"of indexes, got {}\".format(type(key))\n )\n\n mask = np.full(self.modes_activation_bitmask.shape, False)\n mask[key] = True\n\n shallow_copy = copy(self)\n shallow_copy._allocate_modes_bitmask_proxy()\n shallow_copy.modes_activation_bitmask = mask\n\n return shallow_copy\n\n @property\n def original_time(self):\n \"\"\"\n A dictionary which contains information about the time window used to\n fit this DMD instance.\n\n Inside the dictionary:\n\n ====== ====================================================================================\n Key Value\n ====== ====================================================================================\n `t0` Time of the first input snapshot (0 by default).\n `tend` Time of the last input snapshot (usually corresponds to the number of snapshots).\n `dt` Timestep between two snapshots (1 by default).\n ====== ====================================================================================\n\n :return: A dict which contains info about the input time frame.\n :rtype: dict\n \"\"\"\n if self._original_time is None:\n raise RuntimeError(\n \"\"\"\n_set_initial_time_dictionary() has not been called, did you call fit()?\"\"\"\n )\n return self._original_time\n\n @property\n def dmd_time(self):\n \"\"\"\n A dictionary which contains information about the time window used to\n reconstruct/predict using this DMD instance. By default this is equal\n to :func:`original_time`.\n\n Inside the dictionary:\n\n ====== ====================================================================================\n Key Value\n ====== ====================================================================================\n `t0` Time of the first output snapshot.\n `tend` Time of the last output snapshot.\n `dt` Timestep between two snapshots.\n ====== ====================================================================================\n\n :return: A dict which contains info about the input time frame.\n :rtype: dict\n \"\"\"\n if self._dmd_time is None:\n raise RuntimeError(\n \"\"\"\n_set_initial_time_dictionary() has not been called, did you call fit()?\"\"\"\n )\n return self._dmd_time\n\n @dmd_time.setter\n def dmd_time(self, value):\n self._dmd_time = deepcopy(value)\n\n def _set_initial_time_dictionary(self, time_dict):\n \"\"\"\n Set the initial values for the class fields `time_dict` and\n `original_time`. This is usually called in `fit()` and never again.\n\n :param time_dict: Initial time dictionary for this DMD instance.\n :type time_dict: dict\n \"\"\"\n if not (\n \"t0\" in time_dict and \"tend\" in time_dict and \"dt\" in time_dict\n ):\n raise ValueError(\n 'time_dict must contain the keys \"t0\", \"tend\" and \"dt\".'\n )\n if len(time_dict) > 3:\n raise ValueError(\n 'time_dict must contain only the keys \"t0\", \"tend\" and \"dt\".'\n )\n\n self._original_time = DMDTimeDict(dict(time_dict))\n self._dmd_time = DMDTimeDict(dict(time_dict))\n\n def fit(self, X):\n \"\"\"\n Abstract method to fit the snapshots matrices.\n\n Not implemented, it has to be implemented in subclasses.\n \"\"\"\n name = self.__class__.__name__\n msg = f\"Subclass must implement abstract method {name}.fit\"\n raise NotImplementedError(msg)\n\n def _reset(self):\n \"\"\"\n Reset this instance. Should be called in :func:`fit`.\n \"\"\"\n self._modes_activation_bitmask_proxy = None\n self._b = None\n self._snapshots_holder = None\n\n def save(self, fname):\n \"\"\"\n Save the object to `fname` using the pickle module.\n\n :param str fname: the name of file where the reduced order model will\n be saved.\n\n Example:\n\n >>> from pydmd import DMD\n >>> dmd = DMD(...) # Construct here the rom\n >>> dmd.fit(...)\n >>> dmd.save('pydmd.dmd')\n \"\"\"\n with open(fname, \"wb\") as output:\n pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)\n\n @staticmethod\n def load(fname):\n \"\"\"\n Load the object from `fname` using the pickle module.\n\n :return: The `ReducedOrderModel` loaded\n\n Example:\n\n >>> from pydmd import DMD\n >>> dmd = DMD.load('pydmd.dmd')\n >>> print(dmd.reconstructed_data)\n \"\"\"\n with open(fname, \"rb\") as output:\n return pickle.load(output)\n\n def _optimal_dmd_matrices(self):\n # compute the vandermonde matrix\n vander = np.vander(self.eigs, len(self.dmd_timesteps), True)\n\n P = np.multiply(\n np.dot(self.modes.conj().T, self.modes),\n np.conj(np.dot(vander, vander.conj().T)),\n )\n\n if self._exact:\n q = np.conj(\n np.diag(\n np.linalg.multi_dot(\n [vander, self.snapshots.conj().T, self.modes]\n )\n )\n )\n else:\n _, s, V = compute_svd(self.snapshots[:, :-1], self.modes.shape[-1])\n\n q = np.conj(\n np.diag(\n np.linalg.multi_dot(\n [\n vander[:, :-1],\n V,\n np.diag(s).conj(),\n self.operator.eigenvectors,\n ]\n )\n )\n )\n\n return P, q\n\n def _compute_amplitudes(self):\n \"\"\"\n Compute the amplitude coefficients. If `self._opt` is False the\n amplitudes are computed by minimizing the error between the modes and\n the first snapshot; if `self._opt` is True the amplitudes are computed\n by minimizing the error between the modes and all the snapshots, at the\n expense of bigger computational cost.\n\n This method uses the class variables self.snapshots (for the\n snapshots), self.modes and self.eigs.\n\n :return: the amplitudes array\n :rtype: numpy.ndarray\n\n References for optimal amplitudes:\n Jovanovic et al. 2014, Sparsity-promoting dynamic mode decomposition,\n https://hal-polytechnique.archives-ouvertes.fr/hal-00995141/document\n \"\"\"\n if isinstance(self._opt, bool) and self._opt:\n # b optimal\n a = np.linalg.solve(*self._optimal_dmd_matrices())\n else:\n if isinstance(self._opt, bool):\n amplitudes_snapshot_index = 0\n else:\n amplitudes_snapshot_index = self._opt\n\n a = np.linalg.lstsq(\n self.modes,\n self.snapshots.T[amplitudes_snapshot_index],\n rcond=None,\n )[0]\n\n return a" }, { "identifier": "DMDOperator", "path": "build/lib/weidmd/dmdoperator.py", "snippet": "class DMDOperator:\n \"\"\"\n Dynamic Mode Decomposition standard operator class. Non-standard ways of\n computing the low-rank Atilde operator should be coded into subclasses of\n this class.\n\n :param svd_rank: the rank for the truncation; If 0, the method computes the\n optimal rank and uses it for truncation; if positive interger, the\n method uses the argument for the truncation; if float between 0 and 1,\n the rank is the number of the biggest singular values that are needed\n to reach the 'energy' specified by `svd_rank`; if -1, the method does\n not compute truncation.\n :type svd_rank: int or float\n :param bool exact: flag to compute either exact DMD or projected DMD.\n Default is False.\n :param rescale_mode: Scale Atilde as shown in\n 10.1016/j.jneumeth.2015.10.010 (section 2.4) before computing its\n eigendecomposition. None means no rescaling, 'auto' means automatic\n rescaling using singular values, otherwise the scaling factors.\n :type rescale_mode: {'auto'} or None or numpy.ndarray\n :param bool forward_backward: If True, the low-rank operator is computed\n like in fbDMD (reference: https://arxiv.org/abs/1507.02264). Default is\n False.\n :param sorted_eigs: Sort eigenvalues (and modes/dynamics accordingly) by\n magnitude if `sorted_eigs='abs'`, by real part (and then by imaginary\n part to break ties) if `sorted_eigs='real'`. Default: False.\n :type sorted_eigs: {'real', 'abs'} or False\n :param tikhonov_regularization: Tikhonov parameter for the regularization.\n If `None`, no regularization is applied, if `float`, it is used as the\n :math:`\\lambda` tikhonov parameter.\n :type tikhonov_regularization: int or float\n \"\"\"\n\n def __init__(\n self,\n svd_rank,\n exact,\n forward_backward,\n rescale_mode,\n sorted_eigs,\n tikhonov_regularization,\n ):\n self._exact = exact\n self._rescale_mode = rescale_mode\n self._svd_rank = svd_rank\n self._forward_backward = forward_backward\n self._sorted_eigs = sorted_eigs\n self._tikhonov_regularization = tikhonov_regularization\n self._norm_X = None\n\n def compute_operator(self, X, Y):\n \"\"\"\n Compute the low-rank operator.\n\n :param numpy.ndarray X: matrix containing the snapshots x0,..x{n-1} by\n column.\n :param numpy.ndarray Y: matrix containing the snapshots x1,..x{n} by\n column.\n :return: the (truncated) left-singular vectors matrix, the (truncated)\n singular values array, the (truncated) right-singular vectors\n matrix of X.\n :rtype: numpy.ndarray, numpy.ndarray, numpy.ndarray\n \"\"\"\n\n U, s, V = compute_svd(X, self._svd_rank)\n\n if self._tikhonov_regularization is not None:\n self._norm_X = np.linalg.norm(X)\n atilde = self._least_square_operator(U, s, V, Y)\n\n if self._forward_backward:\n # b stands for \"backward\"\n bU, bs, bV = compute_svd(Y, svd_rank=len(s))\n atilde_back = self._least_square_operator(bU, bs, bV, X)\n atilde = sqrtm(atilde.dot(np.linalg.inv(atilde_back)))\n if hasattr(np, \"complex256\") and atilde.dtype == np.complex256:\n atilde = atilde.astype(np.complex128)\n msg = \"Casting atilde from np.complex256 to np.complex128\"\n logging.info(msg)\n\n if self._rescale_mode == \"auto\":\n self._rescale_mode = s\n\n self._Atilde = atilde\n self._compute_eigenquantities()\n self._compute_modes(Y, U, s, V)\n\n return U, s, V\n\n @property\n def shape(self):\n \"\"\"Shape of the operator\"\"\"\n return self.as_numpy_array.shape\n\n def __call__(self, snapshot_lowrank_modal_coefficients):\n \"\"\"\n Apply the low-rank operator to a vector of the modal coefficients of a\n snapshot(s).\n\n :param numpy.ndarray snapshot_lowrank_modal_coefficients: low-rank\n representation (in modal coefficients) of a snapshot x{n}.\n :return: low-rank representation (in modal coefficients) of x{n+1}.\n :rtype: numpy.ndarray\n \"\"\"\n\n return self._Atilde.dot(snapshot_lowrank_modal_coefficients)\n\n @property\n def eigenvalues(self):\n if not hasattr(self, \"_eigenvalues\"):\n raise ValueError(\"You need to call fit before\")\n return self._eigenvalues\n\n @property\n def eigenvectors(self):\n if not hasattr(self, \"_eigenvectors\"):\n raise ValueError(\"You need to call fit before\")\n return self._eigenvectors\n\n @property\n def modes(self):\n if not hasattr(self, \"_modes\"):\n raise ValueError(\"You need to call fit before\")\n return self._modes\n\n @property\n def Lambda(self):\n if not hasattr(self, \"_Lambda\"):\n raise ValueError(\"You need to call fit before\")\n return self._Lambda\n\n @property\n def as_numpy_array(self):\n if not hasattr(self, \"_Atilde\") or self._Atilde is None:\n raise ValueError(\"You need to call fit before\")\n else:\n return self._Atilde\n\n def _least_square_operator(self, U, s, V, Y):\n \"\"\"\n Private method that computes the lowrank operator from the singular\n value decomposition of matrix X and the matrix Y.\n\n .. math::\n\n \\\\mathbf{\\\\tilde{A}} =\n \\\\mathbf{U}^* \\\\mathbf{Y} \\\\mathbf{X}^\\\\dagger \\\\mathbf{U} =\n \\\\mathbf{U}^* \\\\mathbf{Y} \\\\mathbf{V} \\\\mathbf{S}^{-1}\n\n :param numpy.ndarray U: 2D matrix that contains the left-singular\n vectors of X, stored by column.\n :param numpy.ndarray s: 1D array that contains the singular values of\n X.\n :param numpy.ndarray V: 2D matrix that contains the right-singular\n vectors of X, stored by row.\n :param numpy.ndarray Y: input matrix Y.\n :return: the lowrank operator\n :rtype: numpy.ndarray\n \"\"\"\n if self._tikhonov_regularization is not None:\n s = (\n s**2 + self._tikhonov_regularization * self._norm_X\n ) * np.reciprocal(s)\n return np.linalg.multi_dot([U.T.conj(), Y, V]) * np.reciprocal(s)\n\n def _compute_eigenquantities(self):\n \"\"\"\n Private method that computes eigenvalues and eigenvectors of the\n low-dimensional operator, scaled according to self._rescale_mode.\n \"\"\"\n\n if self._rescale_mode is None:\n # scaling isn't required\n Ahat = self._Atilde\n elif isinstance(self._rescale_mode, np.ndarray):\n if len(self._rescale_mode) != self.as_numpy_array.shape[0]:\n raise ValueError(\n \"\"\"Scaling by an invalid number of\n coefficients\"\"\"\n )\n scaling_factors_array = self._rescale_mode\n\n factors_inv_sqrt = np.diag(np.power(scaling_factors_array, -0.5))\n factors_sqrt = np.diag(np.power(scaling_factors_array, 0.5))\n\n # if an index is 0, we get inf when taking the reciprocal\n for idx, item in enumerate(scaling_factors_array):\n if item == 0:\n factors_inv_sqrt[idx] = 0\n\n Ahat = np.linalg.multi_dot(\n [factors_inv_sqrt, self.as_numpy_array, factors_sqrt]\n )\n else:\n raise ValueError(\n \"Invalid value for rescale_mode: {} of type {}\".format(\n self._rescale_mode, type(self._rescale_mode)\n )\n )\n\n self._eigenvalues, self._eigenvectors = np.linalg.eig(Ahat)\n\n if self._sorted_eigs is not False and self._sorted_eigs is not None:\n if self._sorted_eigs == \"abs\":\n\n def k(tp):\n return abs(tp[0])\n\n elif self._sorted_eigs == \"real\":\n\n def k(tp):\n eig = tp[0]\n if isinstance(eig, complex):\n return (eig.real, eig.imag)\n return (eig.real, 0)\n\n else:\n raise ValueError(\n \"Invalid value for sorted_eigs: {}\".format(\n self._sorted_eigs\n )\n )\n\n # each column is an eigenvector, therefore we take the\n # transpose to associate each row (former column) to an\n # eigenvalue before sorting\n a, b = zip(\n *sorted(zip(self._eigenvalues, self._eigenvectors.T), key=k)\n )\n self._eigenvalues = np.array([eig for eig in a])\n # we restore the original condition (eigenvectors in columns)\n self._eigenvectors = np.array([vec for vec in b]).T\n\n def _compute_modes(self, Y, U, Sigma, V):\n \"\"\"\n Private method that computes eigenvalues and eigenvectors of the\n high-dimensional operator (stored in self.modes and self.Lambda).\n\n :param numpy.ndarray Y: matrix containing the snapshots x1,..x{n} by\n column.\n :param numpy.ndarray U: (truncated) left singular vectors of X\n :param numpy.ndarray Sigma: (truncated) singular values of X\n :param numpy.ndarray V: (truncated) right singular vectors of X\n \"\"\"\n\n if self._rescale_mode is None:\n W = self.eigenvectors\n else:\n # compute W as shown in arXiv:1409.5496 (section 2.4)\n factors_sqrt = np.diag(np.power(self._rescale_mode, 0.5))\n W = factors_sqrt.dot(self.eigenvectors)\n\n # compute the eigenvectors of the high-dimensional operator\n if self._exact:\n if self._tikhonov_regularization is not None:\n Sigma = (\n Sigma**2 + self._tikhonov_regularization * self._norm_X\n ) * np.reciprocal(Sigma)\n high_dimensional_eigenvectors = (\n Y.dot(V) * np.reciprocal(Sigma)\n ).dot(W)\n else:\n high_dimensional_eigenvectors = U.dot(W)\n\n # eigenvalues are the same of lowrank\n high_dimensional_eigenvalues = self.eigenvalues\n\n self._modes = high_dimensional_eigenvectors\n self._Lambda = high_dimensional_eigenvalues" }, { "identifier": "compute_svd", "path": "build/lib/weidmd/utils.py", "snippet": "def compute_svd(X, svd_rank=0):\n \"\"\"\n Truncated Singular Value Decomposition.\n\n :param numpy.ndarray X: the matrix to decompose.\n :param svd_rank: the rank for the truncation; If 0, the method computes\n the optimal rank and uses it for truncation; if positive interger,\n the method uses the argument for the truncation; if float between 0\n and 1, the rank is the number of the biggest singular values that\n are needed to reach the 'energy' specified by `svd_rank`; if -1,\n the method does not compute truncation. Default is 0.\n :type svd_rank: int or float\n :return: the truncated left-singular vectors matrix, the truncated\n singular values array, the truncated right-singular vectors matrix.\n :rtype: numpy.ndarray, numpy.ndarray, numpy.ndarray\n\n References:\n Gavish, Matan, and David L. Donoho, The optimal hard threshold for\n singular values is, IEEE Transactions on Information Theory 60.8\n (2014): 5040-5053.\n \"\"\"\n U, s, V = np.linalg.svd(X, full_matrices=False)\n V = V.conj().T\n\n def omega(x):\n return 0.56 * x**3 - 0.95 * x**2 + 1.82 * x + 1.43\n\n if svd_rank == 0:\n beta = np.divide(*sorted(X.shape))\n tau = np.median(s) * omega(beta)\n rank = np.sum(s > tau)\n if rank == 0:\n warnings.warn(\n \"SVD optimal rank is 0. The largest singular values are \"\n \"indistinguishable from noise. Setting rank truncation to 1.\",\n RuntimeWarning,\n )\n rank = 1\n elif 0 < svd_rank < 1:\n cumulative_energy = np.cumsum(s**2 / (s**2).sum())\n rank = np.searchsorted(cumulative_energy, svd_rank) + 1\n elif svd_rank >= 1 and isinstance(svd_rank, int):\n rank = min(svd_rank, U.shape[1])\n else:\n rank = X.shape[1]\n\n U = U[:, :rank]\n V = V[:, :rank]\n s = s[:rank]\n\n return U, s, V" }, { "identifier": "compute_rank", "path": "build/lib/weidmd/rdmd.py", "snippet": "def compute_rank(X, svd_rank=0):\n \"\"\"\n Rank computation for the truncated Singular Value Decomposition.\n :param numpy.ndarray X: the matrix to decompose.\n :param svd_rank: the rank for the truncation; If 0, the method computes\n the optimal rank and uses it for truncation; if positive interger,\n the method uses the argument for the truncation; if float between 0\n and 1, the rank is the number of the biggest singular values that\n are needed to reach the 'energy' specified by `svd_rank`; if -1,\n the method does not compute truncation. Default is 0.\n :type svd_rank: int or float\n :return: the computed rank truncation.\n :rtype: int\n References:\n Gavish, Matan, and David L. Donoho, The optimal hard threshold for\n singular values is, IEEE Transactions on Information Theory 60.8\n (2014): 5040-5053.\n \"\"\"\n U, s, _ = np.linalg.svd(X, full_matrices=False)\n\n def omega(x):\n return 0.56 * x**3 - 0.95 * x**2 + 1.82 * x + 1.43\n\n if svd_rank == 0:\n beta = np.divide(*sorted(X.shape))\n tau = np.median(s) * omega(beta)\n rank = np.sum(s > tau)\n elif 0 < svd_rank < 1:\n cumulative_energy = np.cumsum(s**2 / (s**2).sum())\n rank = np.searchsorted(cumulative_energy, svd_rank) + 1\n elif svd_rank >= 1 and isinstance(svd_rank, int):\n rank = min(svd_rank, U.shape[1])\n else:\n rank = min(X.shape)\n\n return rank" }, { "identifier": "Snapshots", "path": "build/lib/weidmd/snapshots.py", "snippet": "class Snapshots:\n \"\"\"\n Utility class to preprocess snapshots shape for DMD.\n\n This class expects the time to be the last dimensions of the array.\n If a Python list is passed to the constructor, each element in the\n list is assumed to be a snapshot in time.\n\n Space dimensions are flattened (C-order) such that the\n matrix becomes 2D (time changes along the last axis).\n\n :param numpy.array | list(numpy.array) X: Training snapshots.\n \"\"\"\n\n def __init__(self, X):\n (\n self._snapshots,\n self._snapshots_shape,\n ) = Snapshots._unroll_space_dimensions(X)\n\n if self._snapshots.shape[-1] == 1:\n raise ValueError(\"Received only one time snapshot.\")\n\n Snapshots._check_condition_number(self._snapshots)\n\n logging.info(\n \"Snapshots: %s, snapshot shape: %s\",\n self._snapshots.shape,\n self._snapshots_shape,\n )\n\n @staticmethod\n def _unroll_space_dimensions(X):\n if hasattr(X, \"ndim\"):\n if X.ndim == 1:\n raise ValueError(\n \"Expected at least a 2D matrix (space x time).\"\n )\n snapshots = X.reshape((-1, X.shape[-1]))\n shapes = set((X.shape[:-1],))\n else:\n shapes, arrays = zip(\n *[(xarr.shape, xarr.flatten()) for xarr in map(np.asarray, X)]\n )\n\n shapes = set(shapes)\n if len(shapes) != 1:\n raise ValueError(\n f\"Snapshots must have the same size, found {len(shapes)}.\"\n )\n if len(next(iter(shapes))) == 0:\n raise ValueError(\"Expected at least a 2D matrix\")\n\n # move the time to the last axis\n snapshots = np.moveaxis(np.stack(arrays), 0, -1)\n\n return snapshots, shapes.pop()\n\n @staticmethod\n def _check_condition_number(X):\n cond_number = np.linalg.cond(X)\n if cond_number > 10e4:\n warnings.warn(\n f\"Input data condition number {cond_number}. \"\n \"\"\"Consider preprocessing data, passing in augmented data\nmatrix, or regularization methods.\"\"\"\n )\n\n @property\n def snapshots(self):\n \"\"\"\n Snapshots of the system (space flattened).\n \"\"\"\n return self._snapshots\n\n @property\n def snapshots_shape(self):\n \"\"\"\n Original (i.e. non-flattened) snapshot shape (time is ignored).\n \"\"\"\n return self._snapshots_shape" } ]
import warnings import numpy as np from collections import OrderedDict from scipy.sparse import csr_matrix from scipy.linalg import qr from .dmdbase import DMDBase from .dmdoperator import DMDOperator from .utils import compute_svd from .rdmd import compute_rank from .snapshots import Snapshots
12,638
@property def trial_size(self): """ :return: size of the data subsets used during each BOP-DMD trial. :rtype: int or float """ return self._trial_size @property def time(self): """ Get the vector that contains the time points of the fitted snapshots. :return: the vector that contains the original time points. :rtype: numpy.ndarray """ if self._time is None: raise RuntimeError("fit() hasn't been called.") return self._time @property def atilde(self): """ Get the reduced Koopman operator A, called Atilde. :return: the reduced Koopman operator A. :rtype: numpy.ndarray """ return self.operator.as_numpy_array @property def A(self): """ Get the full Koopman operator A. :return: the full Koopman operator A. :rtype: numpy.ndarray """ return self.operator.A @property def dynamics(self): """ Get the time evolution of each mode. :return: matrix that contains all the time evolution, stored by row. :rtype: numpy.ndarray """ t_omega = np.exp(np.outer(self.eigs, self._time)) return np.diag(self.amplitudes).dot(t_omega) def print_varpro_opts(self): """ Prints a formatted information string that displays all chosen variable projection parameter values. """ if self._Atilde is None: raise ValueError("You need to call fit before") opt_names = [ "init_lambda", "maxlam", "lamup", "use_levmarq", "maxiter", "tol", "eps_stall", "use_fulljac", "verbose", ] print("VARIABLE PROJECTION OPTIONS:") print("============================") for name, value in zip(opt_names, self.operator.varpro_opts): if len(name) < 7: print(name + ":\t\t" + str(value)) else: print(name + ":\t" + str(value)) def _initialize_alpha(self): """ Uses projected trapezoidal rule to approximate the eigenvalues of A in z' = Az. The computed eigenvalues will serve as our initial guess for alpha. :return: Approximated eigenvalues of the matrix A. :rtype: numpy.ndarray """ # Project the snapshot data onto the projection basis. ux = self._proj_basis.conj().T.dot(self.snapshots) ux1 = ux[:, :-1] ux2 = ux[:, 1:] # Define the diagonal matrix T as the following. t1 = self._time[:-1] t2 = self._time[1:] T = np.diag(t2 - t1) # Define the matrices Y and Z as the following and compute the # rank-truncated SVD of Y. Y = (ux1 + ux2) / 2 Z = (ux2 - ux1).dot(np.linalg.inv(T)) U, s, V = compute_svd(Y, self._svd_rank) S = np.diag(s) # Compute the matrix Atilde and return its eigenvalues. Atilde = np.linalg.multi_dot([U.conj().T, Z, V, np.linalg.inv(S)]) return np.linalg.eig(Atilde)[0] def fit(self, X, t): """ Compute the Optimized Dynamic Mode Decomposition. :param X: the input snapshots. :type X: numpy.ndarray or iterable :param t: the input time vector. :type t: numpy.ndarray or iterable """ # Process the input data and convert to numpy.ndarrays. self._reset()
class BOPDMDOperator(DMDOperator): """ BOP-DMD operator. :param compute_A: Flag that determines whether or not to compute the full Koopman operator A. :type compute_A: bool :param use_proj: Flag that determines the type of computation to perform. If True, fit input data projected onto the first svd_rank POD modes or columns of proj_basis if provided. If False, fit the full input data. :type use_proj: bool :param init_alpha: Initial guess for the continuous-time DMD eigenvalues. :type init_alpha: numpy.ndarray :param proj_basis: Orthogonal basis for projection, where each column of proj_basis contains a basis mode. :type proj_basis: numpy.ndarray :param num_trials: Number of BOP-DMD trials to perform. If num_trials is a positive integer, num_trials BOP-DMD trials are performed. Otherwise, standard optimized dmd is performed. :type num_trials: int :param trial_size: Size of the randomly selected subset of observations to use for each trial of bagged optimized dmd (BOP-DMD). If trial_size is a positive integer, trial_size many observations will be used per trial. If trial_size is a float between 0 and 1, int(trial_size * m) many observations will be used per trial, where m denotes the total number of data points observed. Note that any other type of input for trial_size will yield an error. :type trial_size: int or float :param eig_sort: Method used to sort eigenvalues (and modes accordingly) when performing BOP-DMD. Eigenvalues will be sorted by real part and then by imaginary part to break ties if `eig_sort="real"`, by imaginary part and then by real part to break ties if `eig_sort="imag"`, or by magnitude if `eig_sort="abs"`. If `eig_sort="auto"`, one of the previously-mentioned sorting methods is chosen depending on eigenvalue variance. :type eig_sort: {"real", "imag", "abs", "auto"} :param init_lambda: Initial value used for the regularization parameter in the Levenberg method. Default is 1.0. Note: Larger lambda values make the method more like gradient descent. :type init_lambda: float :param maxlam: Maximum number of of steps used in the inner Levenberg loop, i.e. the number of times you increase lambda before quitting. Default is 52. :type maxlam: int :param lamup: The factor by which you increase lambda when searching for an appropriate step. Default is 2.0. :type lamup: float :param use_levmarq: Flag that determines whether you use the Levenberg algorithm or the Levenberg-Marquardt algorithm. Default is True, use Levenberg-Marquardt. :type use_levmarq: bool :param maxiter: The maximum number of outer loop iterations to use before quitting. Default is 30. :type maxiter: int :param tol: The tolerance for the relative error in the residual. i.e. the program will terminate if norm(y-Phi(alpha)*b,'fro')/norm(y,'fro') < tol is achieved. Default is 1e-6. :type tol: float :param eps_stall: The tolerance for detecting a stall. i.e. if error(iter-1)-error(iter) < eps_stall*err(iter-1) the program halts. Default is 1e-12. :type eps_stall: float :param use_fulljac: Flag that determines whether or not to use the full expression for the Jacobian or Kaufman's approximation. Default is True, use full expression. :type use_fulljac: bool :param verbose: Flag that determines whether or not to print warning messages that arise during the variable projection routine, and whether or not to print information regarding the method's iterative progress. Default is False, don't print information. :type verbose: bool """ def __init__( self, compute_A, use_proj, init_alpha, proj_basis, num_trials, trial_size, eig_sort, init_lambda=1.0, maxlam=52, lamup=2.0, use_levmarq=True, maxiter=30, tol=1e-6, eps_stall=1e-12, use_fulljac=True, verbose=False, ): self._compute_A = compute_A self._use_proj = use_proj self._init_alpha = init_alpha self._proj_basis = proj_basis self._num_trials = num_trials self._trial_size = trial_size self._eig_sort = eig_sort self._varpro_opts = ( init_lambda, maxlam, lamup, use_levmarq, maxiter, tol, eps_stall, use_fulljac, verbose, ) self._varpro_opts_warn() self._modes = None self._eigenvalues = None self._eigenvalues_std = None self._amplitudes_std = None self._Atilde = None self._A = None @property def varpro_opts(self): """ Get the variable projection options. :return: the variable projection options. :rtype: tuple """ return self._varpro_opts @property def A(self): """ Get the full Koopman operator A. :return: the full Koopman operator A. :rtype: numpy.ndarray """ if not self._compute_A: msg = ( "A not computed during fit. " "Set parameter compute_A = True to compute A." ) raise ValueError(msg) if self._A is None: raise ValueError("You need to call fit before") return self._A @property def amplitudes_std(self): """ Get the amplitudes standard deviation. :return: amplitudes standard deviation. :rtype: numpy.ndarray """ return self._amplitudes_std @property def eigenvalues_std(self): """ Get the eigenvalues standard deviation. :return: eigenvalues standard deviation. :rtype: numpy.ndarray """ return self._eigenvalues_std def _varpro_opts_warn(self): """ Checks the validity of the parameter values in _varpro_opts. Throws an error if any parameter value has an invalid type and generates a warning if any value lies outside of the recommended range. """ # Generate dictionary of recommended value range for each parameter. rec_ranges = OrderedDict() rec_ranges["init_lambda"] = [0.0, 1e16] rec_ranges["maxlam"] = [0, 200] rec_ranges["lamup"] = [1.0, 1e16] rec_ranges["use_levmarq"] = [-np.inf, np.inf] rec_ranges["maxiter"] = [0, 1e12] rec_ranges["tol"] = [0.0, 1e16] rec_ranges["eps_stall"] = [-np.inf, 1.0] rec_ranges["use_fulljac"] = [-np.inf, np.inf] rec_ranges["verbose"] = [-np.inf, np.inf] for opt_value, (opt_name, (opt_min, opt_max)) in zip( self._varpro_opts, rec_ranges.items() ): if not isinstance(opt_value, (int, float, bool)): raise ValueError("Invalid variable projection option given.") if opt_value < opt_min: msg = ( "Option {} with value {} is less than {}, " "which is not recommended." ) warnings.warn(msg.format(opt_name, opt_value, opt_min)) elif opt_value > opt_max: msg = ( "Option {} with value {} is greater than {}, " "which is not recommended." ) warnings.warn(msg.format(opt_name, opt_value, opt_max)) def _exp_function(self, alpha, t): """ Matrix of exponentials. :param alpha: Vector of time scalings in the exponent. :type alpha: numpy.ndarray :param t: Vector of time values. :type t: numpy.ndarray :return: Matrix A such that A[i, j] = exp(t_i * alpha_j). :rtype: numpy.ndarray """ return np.exp(np.outer(t, alpha)) def _exp_function_deriv(self, alpha, t, i): """ Derivatives of the matrix of exponentials. :param alpha: Vector of time scalings in the exponent. :type alpha: numpy.ndarray :param t: Vector of time values. :type t: numpy.ndarray :param i: Index in alpha of the derivative variable. :type i: int :return: Derivatives of Phi(alpha, t) with respect to alpha[i]. :rtype: scipy.sparse.csr_matrix """ m = len(t) n = len(alpha) if i < 0 or i > n - 1: raise ValueError("Invalid index i given to exp_function_deriv.") A = np.multiply(t, np.exp(alpha[i] * t)) return csr_matrix( (A, (np.arange(m), np.full(m, fill_value=i))), shape=(m, n) ) def _compute_irank_svd(self, X, tolrank): """ Helper function that computes and returns the SVD of X with a rank truncation of irank, which denotes the number of singular values of X greater than tolrank * s1, where s1 is the largest singular value of the matrix X. :param X: Matrix to decompose. :type X: numpy.ndarray :param tolrank: Determines the rank of the returned SVD. :type tolrank: float :return: irank truncated SVD of X. :rtype: numpy.ndarray, numpy.ndarray, numpy.ndarray """ U, s, Vh = np.linalg.svd(X, full_matrices=False) irank = np.sum(s > tolrank * s[0]) U = U[:, :irank] S = np.diag(s[:irank]) Vh = Vh[:irank] return U, S, Vh def _bag(self, H, trial_size): """ Given a 2D array of data X, where each row contains a data snapshot, randomly sub-selects and returns data snapshots while preserving the original snapshot order. Note that if trial_size is a positive integer, trial_size many observations will be used per trial. If trial_size is a float between 0 and 1, int(trial_size * m) many observations will be used per trial, where m denotes the total number of snapshots in X. The indices of the sub-selected snapshots are also returned. :param H: Full data matrix to be sub-selected from. :type H: numpy.ndarray :param trial_size: Size of the sub-selection from H. :type trial_size: int or float :return: Matrix of sub-selected data snapshots, stored in each row, and a vector of each snapshots's row index location in H. :rtype: numpy.ndarray, numpy.ndarray """ # Ensure that H is a 2D numpy.ndarray. if not isinstance(H, np.ndarray) or H.ndim != 2: msg = "H must be a 2D np.ndarray." raise ValueError(msg) if 0 < trial_size < 1: batch_size = int(trial_size * H.shape[0]) elif trial_size >= 1 and isinstance(trial_size, int): batch_size = trial_size else: msg = ( "Invalid trial_size parameter. trial_size must be either " "a positive integer or a float between 0 and 1." ) raise ValueError(msg) # Throw an error if the batch size is too large or too small. if batch_size > H.shape[0]: msg = ( "Error bagging the input data. Please ensure that the " "trial_size parameter is small enough for bagging." ) raise ValueError(msg) if batch_size == 0: msg = ( "Error bagging the input data. Please ensure that the " "trial_size parameter is large enough for bagging." ) raise ValueError(msg) # Obtain and return subset of the data. all_inds = np.arange(H.shape[0]) subset_inds = np.sort( np.random.choice(all_inds, size=batch_size, replace=False) ) return H[subset_inds], subset_inds def _variable_projection(self, H, t, init_alpha, Phi, dPhi): """ Variable projection routine for multivariate data. Attempts to fit the columns of H as linear combinations of the columns of Phi(alpha,t) such that H = Phi(alpha,t)B. Note that M denotes the number of data samples, N denotes the number of columns of Phi, IS denotes the number of functions to fit, and IA denotes the length of the alpha vector. :param H: (M, IS) matrix of data. :type H: numpy.ndarray :param t: (M,) vector of sample times. :type t: numpy.ndarray :param init_alpha: initial guess for alpha. :type init_alpha: numpy.ndarray :param Phi: (M, N) matrix-valued function Phi(alpha,t). :type Phi: function :param dPhi: (M, N) matrix-valued function dPhi(alpha,t,i) that contains the derivatives of Phi wrt the ith component of alpha. :type dPhi: function :return: Tuple of two numpy arrays representing... 1. (N, IS) best-fit matrix B. 2. (N,) best-fit vector alpha. :rtype: Tuple[numpy.ndarray, numpy.ndarray] References: - Extensions and Uses of the Variable Projection Algorith for Solving Nonlinear Least Squares Problems by G. H. Golub and R. J. LeVeque ARO Report 79-3, Proceedings of the 1979 Army Numerical Analsysis and Computers Conference. - Variable projection for nonlinear least squares problems. Computational Optimization and Applications 54.3 (2013): 579-593 by Dianne P. O'Leary and Bert W. Rust. """ def compute_residual(alpha): """ Helper function that, given alpha, and using H, t, Phi as they are passed to the _variable_projection function, computes and returns the matrix Phi(alpha,t), B from the expression H = Phi(alpha,t)B, the residual H - Phi(alpha,t)B, and 0.5*norm(residual,'fro')^2, which will be used to denote the error. """ Phi_matrix = Phi(alpha, t) B = np.linalg.lstsq(Phi_matrix, H, rcond=None)[0] residual = H - Phi_matrix.dot(B) error = 0.5 * np.linalg.norm(residual, "fro") ** 2 return B, residual, error # Define M, IS, and IA. M, IS = H.shape IA = len(init_alpha) # Unpack all variable projection parameters stored in varpro_opts. ( init_lambda, maxlam, lamup, use_levmarq, maxiter, tol, eps_stall, use_fulljac, verbose, ) = self._varpro_opts # Initialize values. tolrank = M * np.finfo(float).eps _lambda = init_lambda alpha = np.copy(init_alpha) B, residual, error = compute_residual(alpha) U, S, Vh = self._compute_irank_svd(Phi(alpha, t), tolrank) # Initialize storage. all_error = np.zeros(maxiter) djac_matrix = np.zeros((M * IS, IA), dtype="complex") rjac = np.zeros((2 * IA, IA), dtype="complex") scales = np.zeros(IA) for itr in range(maxiter): # Build Jacobian matrix, looping over alpha indices. for i in range(IA): # Build the approximate expression for the Jacobian. dphi_temp = dPhi(alpha, t, i) ut_dphi = csr_matrix(U.conj().T @ dphi_temp) uut_dphi = csr_matrix(U @ ut_dphi) djac_a = (dphi_temp - uut_dphi) @ B djac_matrix[:, i] = djac_a.ravel(order="F") # Compute the full expression for the Jacobian. if use_fulljac: transform = np.linalg.multi_dot([U, np.linalg.inv(S), Vh]) dphit_res = csr_matrix(dphi_temp.conj().T @ residual) djac_b = transform @ dphit_res djac_matrix[:, i] += djac_b.ravel(order="F") # Scale for the Levenberg algorithm. scales[i] = 1 # Scale for the Levenberg-Marquardt algorithm. if use_levmarq: scales[i] = min(np.linalg.norm(djac_matrix[:, i]), 1) scales[i] = max(scales[i], 1e-6) # Loop to determine lambda (the step-size parameter). rhs_temp = np.copy(residual.ravel(order="F"))[:, None] q_out, djac_out, j_pvt = qr( djac_matrix, mode="economic", pivoting=True ) ij_pvt = np.arange(IA) ij_pvt = ij_pvt[j_pvt] rjac[:IA] = np.triu(djac_out[:IA]) rhs_top = q_out.conj().T.dot(rhs_temp) scales_pvt = scales[j_pvt[:IA]] rhs = np.concatenate( (rhs_top[:IA], np.zeros(IA, dtype="complex")), axis=None ) def step(_lambda, rhs, scales_pvt, ij_pvt): """ Helper function that, given a step size _lambda and the current right-hand side and pivots, computes and returns delta, the amount in which we update alpha, and the updated alpha vector. Note that this function uses rjac and alpha as they are defined outside of this function. """ # Compute the step delta. rjac[IA:] = _lambda * np.diag(scales_pvt) delta = np.linalg.lstsq(rjac, rhs, rcond=None)[0] delta = delta[ij_pvt] # Compute the updated alpha vector. alpha_updated = alpha.ravel() + delta.ravel() return delta, alpha_updated # Take a step using our initial step size init_lambda. delta_0, alpha_0 = step(_lambda, rhs, scales_pvt, ij_pvt) B_0, residual_0, error_0 = compute_residual(alpha_0) # Check actual improvement vs predicted improvement. actual_improvement = error - error_0 pred_improvement = ( 0.5 * np.linalg.multi_dot( [delta_0.conj().T, djac_matrix.conj().T, rhs_temp] ).real ) improvement_ratio = actual_improvement / pred_improvement if error_0 < error: # Rescale lambda based on the improvement ratio. _lambda *= max(1 / 3, 1 - (2 * improvement_ratio - 1) ** 3) alpha, B, residual, error = alpha_0, B_0, residual_0, error_0 else: # Increase lambda until something works. for _ in range(maxlam): _lambda *= lamup delta_0, alpha_0 = step(_lambda, rhs, scales_pvt, ij_pvt) B_0, residual_0, error_0 = compute_residual(alpha_0) if error_0 < error: alpha, B = alpha_0, B_0 residual, error = residual_0, error_0 break # Terminate if no appropriate step length was found. if error_0 >= error: if verbose: msg = ( "Failed to find appropriate step length at " "iteration {}. Current error {}." ) warnings.warn(msg.format(itr, error)) return B, alpha # Record the current error. all_error[itr] = error # Print iterative progress if the verbose flag is turned on. if verbose: update_msg = "Step {} Error {} Lambda {}" print(update_msg.format(itr, error, _lambda)) # Terminate if the tolerance is met. if error < tol: return B, alpha # Terminate if a stall is detected. if ( itr > 0 and all_error[itr - 1] - all_error[itr] < eps_stall * all_error[itr - 1] ): if verbose: msg = ( "Stall detected: error reduced by less than {} " "times the error at the previous step. " "Iteration {}. Current error {}." ) warnings.warn(msg.format(eps_stall, itr, error)) return B, alpha U, S, Vh = self._compute_irank_svd(Phi(alpha, t), tolrank) # Failed to meet tolerance in maxiter steps. if verbose: msg = ( "Failed to reach tolerance after maxiter = {} iterations. " "Current error {}." ) warnings.warn(msg.format(maxiter, error)) return B, alpha def _single_trial_compute_operator(self, H, t, init_alpha): """ Helper function that computes the standard optimized dmd operator. Returns the resulting DMD modes, eigenvalues, amplitudes, reduced system matrix, and full system matrix respectively. """ B, alpha = self._variable_projection( H, t, init_alpha, self._exp_function, self._exp_function_deriv ) # Save the modes, eigenvalues, and amplitudes respectively. w = B.T e = alpha b = np.sqrt(np.sum(np.abs(w) ** 2, axis=0)) # Normalize the modes and the amplitudes. inds_small = np.abs(b) < (10 * np.finfo(float).eps * np.max(b)) b[inds_small] = 1.0 w = w.dot(np.diag(1 / b)) w[:, inds_small] = 0.0 b[inds_small] = 0.0 # Compute the projected propagator Atilde. if self._use_proj: Atilde = np.linalg.multi_dot([w, np.diag(e), np.linalg.pinv(w)]) # Unproject the dmd modes. w = self._proj_basis.dot(w) else: w_proj = self._proj_basis.conj().T.dot(w) Atilde = np.linalg.multi_dot( [w_proj, np.diag(e), np.linalg.pinv(w_proj)] ) # Compute the full system matrix A. if self._compute_A: A = np.linalg.multi_dot([w, np.diag(e), np.linalg.pinv(w)]) else: A = None return w, e, b, Atilde, A def compute_operator(self, H, t): """ Compute the low-rank and the full BOP-DMD operators. :param H: Matrix of data to fit. :type H: numpy.ndarray :param t: Vector of sample times. :type t: numpy.ndarray :return: The BOP-DMD amplitudes. :rtype: numpy.ndarray """ # Perform an initial optimized dmd solve using init_alpha. w_0, e_0, b_0, Atilde_0, A_0 = self._single_trial_compute_operator( H, t, self._init_alpha ) # If num_trials isn't a positive int, perform standard optimized dmd. if self._num_trials <= 0 or not isinstance(self._num_trials, int): self._modes = w_0 self._eigenvalues = e_0 self._Atilde = Atilde_0 self._A = A_0 return b_0 # Perform BOP-DMD. # Initialize bagging result storage. all_w = np.empty((self._num_trials, *w_0.shape), dtype="complex") all_e = np.empty((self._num_trials, *e_0.shape), dtype="complex") all_b = np.empty((self._num_trials, *b_0.shape), dtype="complex") # Perform num_trials many trials of optimized dmd. for i in range(self._num_trials): H_i, subset_inds = self._bag(H, self._trial_size) w_i, e_i, b_i, _, _ = self._single_trial_compute_operator( H_i, t[subset_inds], e_0 ) # Set the sorting style if _eig_sort is "auto". if self._eig_sort == "auto": real_var = np.var(e_i.real) imag_var = np.var(e_i.imag) abs_var = np.var(np.abs(e_i)) all_var = [real_var, imag_var, abs_var] if np.argmax(all_var) == 0: self._eig_sort = "real" elif np.argmax(all_var) == 1: self._eig_sort = "imag" else: self._eig_sort = "abs" # Sort the results according to eigenvalue. if self._eig_sort == "real": sorted_inds = np.argsort(e_i) elif self._eig_sort == "imag": e_i_real_imag_swapped = e_i.imag + (1j * e_i.real) sorted_inds = np.argsort(e_i_real_imag_swapped) elif self._eig_sort == "abs": sorted_inds = np.argsort(np.abs(e_i)) else: raise ValueError("Provided eig_sort method is not supported.") all_w[i] = w_i[:, sorted_inds] all_e[i] = e_i[sorted_inds] all_b[i] = b_i[sorted_inds] # Compute and use the average optimized dmd results. self._modes = np.mean(all_w, axis=0) self._eigenvalues = np.mean(all_e, axis=0) # Compute Atilde using the average optimized dmd results. w_proj = self._proj_basis.conj().T.dot(self._modes) self._Atilde = np.linalg.multi_dot( [w_proj, np.diag(self._eigenvalues), np.linalg.pinv(w_proj)] ) # Compute A if requested. if self._compute_A: self._A = np.linalg.multi_dot( [ self._modes, np.diag(self._eigenvalues), np.linalg.pinv(self._modes), ] ) # Compute and save the standard deviation of the optimized dmd results. self._eigenvalues_std = np.std(all_e, axis=0) self._amplitudes_std = np.std(all_b, axis=0) return np.mean(all_b, axis=0) class BOPDMD(DMDBase): """ Bagging, Optimized Dynamic Mode Decomposition. :param svd_rank: The rank for the truncation; If 0, the method computes the optimal rank and uses it for truncation; if positive integer, the method uses the argument for the truncation; if float between 0 and 1, the rank is the number of the biggest singular values that are needed to reach the 'energy' specified by `svd_rank`; if -1, the method does not compute truncation. :type svd_rank: int or float :param compute_A: Flag that determines whether or not to compute the full Koopman operator A. Default is False, do not compute the full operator. Note that the full operator is potentially prohibitively expensive to compute. :type compute_A: bool :param use_proj: Flag that determines the type of computation to perform. If True, fit input data projected onto the first svd_rank POD modes or columns of proj_basis if provided. If False, fit the full input data. Default is True, fit projected data. :type use_proj: bool :param init_alpha: Initial guess for the continuous-time DMD eigenvalues. If not provided, one is computed via a trapezoidal rule approximation. Default is None (alpha not provided). :type init_alpha: numpy.ndarray :param proj_basis: Orthogonal basis for projection, where each column of proj_basis contains a basis mode. If not provided, POD modes are used. Default is None (basis not provided). :type proj_basis: numpy.ndarray :param num_trials: Number of BOP-DMD trials to perform. If num_trials is a positive integer, num_trials BOP-DMD trials are performed. Otherwise, standard optimized dmd is performed. Default is 0. :type num_trials: int :param trial_size: Size of the randomly selected subset of observations to use for each trial of bagged optimized dmd (BOP-DMD). If trial_size is a positive integer, trial_size many observations will be used per trial. If trial_size is a float between 0 and 1, int(trial_size * m) many observations will be used per trial, where m denotes the total number of data points observed. Note that any other type of input for trial_size will yield an error. Default is 0.2. :type trial_size: int or float :param eig_sort: Method used to sort eigenvalues (and modes accordingly) when performing BOP-DMD. Eigenvalues will be sorted by real part and then by imaginary part to break ties if `eig_sort="real"`, by imaginary part and then by real part to break ties if `eig_sort="imag"`, or by magnitude if `eig_sort="abs"`. If `eig_sort="auto"`, one of the previously-mentioned sorting methods is chosen depending on eigenvalue variance. Default is "auto". :type eig_sort: {"real", "imag", "abs", "auto"} :param varpro_opts_dict: Dictionary containing the desired parameter values for variable projection. The following parameters may be specified: `init_lambda`, `maxlam`, `lamup`, `use_levmarq`, `maxiter`, `tol`, `eps_stall`, `use_fulljac`, `verbose`. Default values will be used for any parameters not specified in `varpro_opts_dict`. See `BOPDMDOperator` documentation for default values and descriptions for each parameter. :type varpro_opts_dict: dict """ def __init__( self, svd_rank=0, compute_A=False, use_proj=True, init_alpha=None, proj_basis=None, num_trials=0, trial_size=0.2, eig_sort="auto", varpro_opts_dict=None, ): self._svd_rank = svd_rank self._compute_A = compute_A self._use_proj = use_proj self._init_alpha = init_alpha self._proj_basis = proj_basis self._num_trials = num_trials self._trial_size = trial_size self._eig_sort = eig_sort if varpro_opts_dict is None: self._varpro_opts_dict = {} elif not isinstance(varpro_opts_dict, dict): raise ValueError("varpro_opts_dict must be a dict.") else: self._varpro_opts_dict = varpro_opts_dict self._snapshots_holder = None self._time = None self._Atilde = None self._modes_activation_bitmask_proxy = None @property def svd_rank(self): """ :return: the rank used for the svd truncation. :rtype: int or float """ return self._svd_rank @property def compute_A(self): """ :return: flag that determines whether to compute the full operator A. :rtype: bool """ return self._compute_A @property def use_proj(self): """ :return: flag that determines whether to fit projected or full data. :rtype: bool """ return self._use_proj @property def init_alpha(self): """ :return: initial guess used for the continuous-time DMD eigenvalues. :rtype: numpy.ndarray """ if self._init_alpha is None: msg = ( "fit() hasn't been called " "and no initial value for alpha has been given." ) raise RuntimeError(msg) return self._init_alpha @property def proj_basis(self): """ :return: the projection basis used, with modes stored by column. :rtype: numpy.ndarray """ if self._proj_basis is None: msg = ( "fit() hasn't been called " "and no projection basis has been given." ) raise RuntimeError(msg) return self._proj_basis @property def num_trials(self): """ :return: the number of BOP-DMD trials to perform. :rtype: int """ return self._num_trials @property def trial_size(self): """ :return: size of the data subsets used during each BOP-DMD trial. :rtype: int or float """ return self._trial_size @property def time(self): """ Get the vector that contains the time points of the fitted snapshots. :return: the vector that contains the original time points. :rtype: numpy.ndarray """ if self._time is None: raise RuntimeError("fit() hasn't been called.") return self._time @property def atilde(self): """ Get the reduced Koopman operator A, called Atilde. :return: the reduced Koopman operator A. :rtype: numpy.ndarray """ return self.operator.as_numpy_array @property def A(self): """ Get the full Koopman operator A. :return: the full Koopman operator A. :rtype: numpy.ndarray """ return self.operator.A @property def dynamics(self): """ Get the time evolution of each mode. :return: matrix that contains all the time evolution, stored by row. :rtype: numpy.ndarray """ t_omega = np.exp(np.outer(self.eigs, self._time)) return np.diag(self.amplitudes).dot(t_omega) def print_varpro_opts(self): """ Prints a formatted information string that displays all chosen variable projection parameter values. """ if self._Atilde is None: raise ValueError("You need to call fit before") opt_names = [ "init_lambda", "maxlam", "lamup", "use_levmarq", "maxiter", "tol", "eps_stall", "use_fulljac", "verbose", ] print("VARIABLE PROJECTION OPTIONS:") print("============================") for name, value in zip(opt_names, self.operator.varpro_opts): if len(name) < 7: print(name + ":\t\t" + str(value)) else: print(name + ":\t" + str(value)) def _initialize_alpha(self): """ Uses projected trapezoidal rule to approximate the eigenvalues of A in z' = Az. The computed eigenvalues will serve as our initial guess for alpha. :return: Approximated eigenvalues of the matrix A. :rtype: numpy.ndarray """ # Project the snapshot data onto the projection basis. ux = self._proj_basis.conj().T.dot(self.snapshots) ux1 = ux[:, :-1] ux2 = ux[:, 1:] # Define the diagonal matrix T as the following. t1 = self._time[:-1] t2 = self._time[1:] T = np.diag(t2 - t1) # Define the matrices Y and Z as the following and compute the # rank-truncated SVD of Y. Y = (ux1 + ux2) / 2 Z = (ux2 - ux1).dot(np.linalg.inv(T)) U, s, V = compute_svd(Y, self._svd_rank) S = np.diag(s) # Compute the matrix Atilde and return its eigenvalues. Atilde = np.linalg.multi_dot([U.conj().T, Z, V, np.linalg.inv(S)]) return np.linalg.eig(Atilde)[0] def fit(self, X, t): """ Compute the Optimized Dynamic Mode Decomposition. :param X: the input snapshots. :type X: numpy.ndarray or iterable :param t: the input time vector. :type t: numpy.ndarray or iterable """ # Process the input data and convert to numpy.ndarrays. self._reset()
self._snapshots_holder = Snapshots(X)
4
2023-10-30 12:37:40+00:00
16k
lewandofskee/DiAD
ldm/models/diffusion/.ipynb_checkpoints/ddpm-checkpoint.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False\n ):\n super().__init__()\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n # x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(\n self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n if log_ema or self.use_ema:\n with self.ema_scope():\n xrec_ema, posterior_ema = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec_ema.shape[1] > 3\n xrec_ema = self.to_rgb(xrec_ema)\n log[\"samples_ema\"] = self.decode(torch.randn_like(posterior_ema.sample()))\n log[\"reconstructions_ema\"] = xrec_ema\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True,timesteps=1000):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n x_T=None,\n timesteps=1000,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose,timesteps=timesteps)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n timesteps=timesteps,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0])\n # subset_end = int(timesteps+1 * self.ddim_timesteps.shape[0] / self.ddpm_num_timesteps)\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % 500 == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" }, { "identifier": "cal_anomaly_map", "path": "utils/util.py", "snippet": "def cal_anomaly_map(fs_list, ft_list, out_size=224, amap_mode='mul'):\n if amap_mode == 'mul':\n anomaly_map = np.ones([out_size, out_size])\n else:\n anomaly_map = np.zeros([out_size, out_size])\n a_map_list = []\n for i in range(len(ft_list)):\n fs = fs_list[i]\n ft = ft_list[i]\n #fs_norm = F.normalize(fs, p=2)\n #ft_norm = F.normalize(ft, p=2)\n a_map = 1 - F.cosine_similarity(fs, ft)\n a_map = torch.unsqueeze(a_map, dim=1)\n a_map = F.interpolate(a_map, size=out_size, mode='bilinear', align_corners=True)\n a_map = a_map[0, 0, :, :].to('cpu').detach().numpy()\n a_map_list.append(a_map)\n if amap_mode == 'mul':\n anomaly_map *= a_map\n else:\n anomaly_map += a_map\n return anomaly_map, a_map_list" }, { "identifier": "log_local", "path": "utils/util.py", "snippet": "def log_local(images, filenames):\n pixel_mean = [0.485, 0.456, 0.406]\n pixel_std = [0.229, 0.224, 0.225]\n pixel_mean = torch.tensor(pixel_mean).cuda().unsqueeze(1).unsqueeze(1) # 3 x 1 x 1\n pixel_std = torch.tensor(pixel_std).cuda().unsqueeze(1).unsqueeze(1)\n root = os.path.join('log_image/')\n name = filenames[-7:-4]\n for k in images:\n image = (images[k].squeeze() * pixel_std + pixel_mean) * 255\n image = image.permute(1, 2, 0).to('cpu').numpy()\n filename = \"{}-{}.jpg\".format(name, k)\n path = os.path.join(root, filenames[:-7],filename)\n os.makedirs(os.path.split(path)[0], exist_ok=True)\n # Image.fromarray(image).save(path)\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n cv2.imwrite(path, image)" }, { "identifier": "create_logger", "path": "utils/util.py", "snippet": "def create_logger(name, log_file, level=logging.INFO):\n log = logging.getLogger(name)\n formatter = logging.Formatter(\n \"[%(asctime)s][%(filename)15s][line:%(lineno)4d][%(levelname)8s] %(message)s\"\n )\n fh = logging.FileHandler(log_file)\n fh.setFormatter(formatter)\n sh = logging.StreamHandler()\n sh.setFormatter(formatter)\n log.setLevel(level)\n log.addHandler(fh)\n log.addHandler(sh)\n return log" }, { "identifier": "dump", "path": "utils/eval_helper.py", "snippet": "def dump(save_dir, outputs):\n filenames = outputs[\"filename\"]\n batch_size = len(filenames)\n preds = outputs[\"pred\"].cpu().numpy() # B x 1 x H x W\n masks = outputs[\"mask\"].cpu().numpy() # B x 1 x H x W\n # heights = outputs[\"height\"].cpu().numpy()\n # widths = outputs[\"width\"].cpu().numpy()\n clsnames = outputs[\"clsname\"]\n for i in range(batch_size):\n file_dir, filename = os.path.split(filenames[i])\n _, subname = os.path.split(file_dir)\n filename = \"{}_{}_{}\".format(clsnames[i], subname, filename)\n filename, _ = os.path.splitext(filename)\n save_file = os.path.join(save_dir, filename + \".npz\")\n np.savez(\n save_file,\n filename=filenames[i],\n pred=preds[i],\n mask=masks[i],\n # height=heights[i],\n # width=widths[i],\n clsname=clsnames[i],\n )" }, { "identifier": "log_metrics", "path": "utils/eval_helper.py", "snippet": "def log_metrics(ret_metrics, config):\n logger = logging.getLogger(\"global_logger\")\n clsnames = set([k.rsplit(\"_\", 2)[0] for k in ret_metrics.keys()])\n clsnames = list(clsnames - set([\"mean\"])) + [\"mean\"]\n\n # auc\n if config.get(\"auc\", None):\n auc_keys = [k for k in ret_metrics.keys() if \"auc\" in k]\n evalnames = list(set([k.rsplit(\"_\", 2)[1] for k in auc_keys]))\n record = Report([\"clsname\"] + evalnames)\n\n for clsname in clsnames:\n clsvalues = [\n ret_metrics[\"{}_{}_auc\".format(clsname, evalname)]\n for evalname in evalnames\n ]\n record.add_one_record([clsname] + clsvalues)\n\n logger.info(f\"\\n{record}\")" }, { "identifier": "merge_together", "path": "utils/eval_helper.py", "snippet": "def merge_together(save_dir):\n npz_file_list = glob.glob(os.path.join(save_dir, \"*.npz\"))\n fileinfos = []\n preds = []\n masks = []\n for npz_file in npz_file_list:\n npz = np.load(npz_file)\n fileinfos.append(\n {\n \"filename\": str(npz[\"filename\"]),\n # \"height\": npz[\"height\"],\n # \"width\": npz[\"width\"],\n \"clsname\": str(npz[\"clsname\"]),\n }\n )\n preds.append(npz[\"pred\"])\n masks.append(npz[\"mask\"])\n preds = np.concatenate(np.asarray(preds), axis=0) # N x H x W\n masks = np.concatenate(np.asarray(masks), axis=0) # N x H x W\n return fileinfos, preds, masks" }, { "identifier": "performances", "path": "utils/eval_helper.py", "snippet": "def performances(fileinfos, preds, masks, config):\n ret_metrics = {}\n clsnames = set([fileinfo[\"clsname\"] for fileinfo in fileinfos])\n for clsname in clsnames:\n preds_cls = []\n masks_cls = []\n file_cls = []\n for fileinfo, pred, mask in zip(fileinfos, preds, masks):\n if fileinfo[\"clsname\"] == clsname:\n preds_cls.append(pred[None, ...])\n masks_cls.append(mask[None, ...])\n file_cls.append(fileinfo['filename'])\n preds_cls = np.concatenate(np.asarray(preds_cls), axis=0) # N x H x W\n masks_cls = np.concatenate(np.asarray(masks_cls), axis=0) # N x H x W\n data_meta = EvalDataMeta(preds_cls, masks_cls, file_cls)\n\n # auc\n if config.get(\"auc\", None):\n for metric in config[\"auc\"]:\n evalname = metric[\"name\"]\n kwargs = metric.get(\"kwargs\", {})\n eval_method = eval_lookup_table[evalname](data_meta, **kwargs)\n auc = eval_method.eval_auc()\n ret_metrics[\"{}_{}_auc\".format(clsname, evalname)] = auc\n\n if config.get(\"auc\", None):\n for metric in config[\"auc\"]:\n evalname = metric[\"name\"]\n evalvalues = [\n ret_metrics[\"{}_{}_auc\".format(clsname, evalname)]\n for clsname in clsnames\n ]\n mean_auc = np.mean(np.array(evalvalues))\n ret_metrics[\"{}_{}_auc\".format(\"mean\", evalname)] = mean_auc\n\n return ret_metrics" } ]
import torch import os import logging import timm import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler from scipy.ndimage import gaussian_filter from utils.util import cal_anomaly_map, log_local, create_logger from utils.eval_helper import dump, log_metrics, merge_together, performances
14,100
if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim:
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) else: self.register_buffer('logvar', logvar) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_v(self, x, noise, t): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x ) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] # x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): input_img = batch['jpg'] input_features = self.pretrained_model(input_img) output = self.log_images_test(batch) log_local(output, batch["filename"][0]) output_img = output['samples'] output_features = self.pretrained_model(output_img) input_features = input_features[1:4] output_features = output_features[1:4] anomaly_map, _ = cal_anomaly_map(input_features, output_features, input_img.shape[-1], amap_mode='a') anomaly_map = gaussian_filter(anomaly_map, sigma=5) anomaly_map = torch.from_numpy(anomaly_map) anomaly_map_prediction = anomaly_map.unsqueeze(dim=0).unsqueeze(dim=1) batch['pred'] = anomaly_map_prediction batch["output"] = output_img.cpu() batch["input"] = input_img.cpu() dump(self.evl_dir, batch) @torch.no_grad() def on_validation_epoch_start(self): self.evl_dir = "npz_result" self.logger_val = create_logger("global_logger", "log/") pretrained_model = timm.create_model("resnet50", pretrained=True, features_only=True) self.pretrained_model = pretrained_model.to("cuda") self.pretrained_model.eval() os.makedirs(self.evl_dir, exist_ok=True) @torch.no_grad() def on_validation_epoch_end(self, *args, **kwargs): # evl_metrics = {'auc': [{'name': 'max'}, {'name': 'pixel'}, {'name': 'appx'}, {'name': 'apsp'}, # {'name': 'f1px'}, {'name': 'f1sp'}]} evl_metrics = {'auc': [{'name': 'max'}, {'name': 'pixel'}, {'name': 'pro'}, {'name': 'appx'}, {'name': 'apsp'}, {'name': 'f1px'}, {'name': 'f1sp'}]} # evl_metrics = {'auc': [{'name': 'max'}, {'name': 'pixel'}]} self.print("Gathering final results ...") fileinfos, preds, masks = merge_together(self.evl_dir) ret_metrics = performances(fileinfos, preds, masks, evl_metrics) log_metrics(ret_metrics, evl_metrics) auroc_px = ret_metrics['mean_pixel_auc'] auroc_sp = ret_metrics['mean_max_auc'] val_acc = auroc_px + auroc_sp self.log('val_acc', val_acc, on_epoch=True, prog_bar=True, logger=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, force_null_conditioning=False, *args, **kwargs): self.force_null_conditioning = force_null_conditioning self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning: conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) reset_ema = kwargs.pop("reset_ema", False) reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True if reset_ema: assert self.use_ema print( f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, return_x=False): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() # z = x if self.model.conditioning_key is not None and not self.force_null_conditioning: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox', "txt"]: xc = batch[cond_key] elif cond_key in ['class_label', 'cls']: xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_x: out.extend([x]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is expected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim:
ddim_sampler = DDIMSampler(self)
16
2023-10-30 14:21:09+00:00
16k
nv-tlabs/trace
tbsim/algos/algos.py
[ { "identifier": "batch_utils", "path": "tbsim/utils/batch_utils.py", "snippet": "def batch_utils():\n return trajdataBatchUtils()" }, { "identifier": "Action", "path": "tbsim/policies/common.py", "snippet": "class Action(Trajectory):\n pass" }, { "identifier": "DiffuserModel", "path": "tbsim/models/trace.py", "snippet": "class DiffuserModel(nn.Module):\n '''\n TRACE model.\n '''\n def __init__(\n self,\n map_encoder_model_arch: str,\n input_image_shape,\n map_feature_dim: int,\n map_grid_feature_dim: int,\n diffuser_model_arch: str,\n horizon: int,\n observation_dim: int, \n action_dim: int,\n output_dim: int,\n cond_feature_dim = 256,\n rasterized_map = True,\n use_map_feat_global = False,\n use_map_feat_grid = True,\n hist_num_frames = 31,\n hist_feature_dim = 128,\n n_timesteps=1000,\n loss_type='l2', \n action_weight=1.0, \n loss_discount=1.0, \n dim_mults=(1, 2, 4, 8),\n dynamics_type=None,\n dynamics_kwargs={},\n base_dim=32,\n diffuser_input_mode='state_and_action',\n use_conditioning=True,\n cond_fill_value=-1.0,\n # norm info is ([add_coeffs, div_coeffs])\n diffuser_norm_info=([-17.5, 0, 0, 0, 0, 0],[22.5, 10, 40, 3.14, 500, 31.4]),\n # if using non-rasterized histories, need these\n agent_hist_norm_info=([0.0, 0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0, 1.0]),\n neighbor_hist_norm_info=([0.0, 0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0, 1.0]),\n dt=0.1,\n ) -> None:\n\n super().__init__()\n\n # this applies to map and past NEIGHBOR conditioning only\n # curr state or past ego trajecotry are always given\n self.use_conditioning = use_conditioning\n # for test-time classifier-free guidance, if desired\n self.cond_fill_value = cond_fill_value \n\n self.rasterized_map = rasterized_map\n\n cond_in_feat_size = 0\n cond_out_feat_size = cond_feature_dim\n\n # history encoding\n self.agent_hist_encoder = self.neighbor_hist_encoder = None\n # ego history is ALWAYS used as conditioning\n self.agent_hist_encoder = AgentHistoryEncoder(hist_num_frames,\n out_dim=hist_feature_dim,\n use_norm=True,\n norm_info=agent_hist_norm_info)\n cond_in_feat_size += hist_feature_dim\n\n if self.use_conditioning:\n self.neighbor_hist_encoder = NeighborHistoryEncoder(hist_num_frames,\n out_dim=hist_feature_dim,\n use_norm=True,\n norm_info=neighbor_hist_norm_info)\n cond_in_feat_size += hist_feature_dim\n\n # map encoding\n self.map_encoder = None\n self.use_map_feat_global = use_map_feat_global\n self.use_map_feat_grid = use_map_feat_grid\n self.input_image_shape = input_image_shape\n if self.use_conditioning and self.rasterized_map:\n self.map_encoder = MapEncoder(\n model_arch=map_encoder_model_arch,\n input_image_shape=input_image_shape,\n global_feature_dim=map_feature_dim if self.use_map_feat_global else None,\n grid_feature_dim=map_grid_feature_dim if self.use_map_feat_grid else None,\n )\n\n if self.use_map_feat_global:\n cond_in_feat_size += map_feature_dim\n\n # MLP to combine conditioning from all sources\n combine_layer_dims = (cond_in_feat_size, cond_in_feat_size, cond_out_feat_size, cond_out_feat_size)\n self.process_cond_mlp = base_models.MLP(cond_in_feat_size,\n cond_out_feat_size,\n combine_layer_dims,\n normalization=True)\n\n self._dynamics_type = dynamics_type\n self._dynamics_kwargs = dynamics_kwargs\n self._create_dynamics()\n \n # ----- diffuser -----\n self.dt = dt\n # x, y, vel, yaw, acc, yawvel\n assert len(diffuser_norm_info) == 2\n norm_add_coeffs = diffuser_norm_info[0]\n norm_div_coeffs = diffuser_norm_info[1]\n assert len(norm_add_coeffs) == 6\n assert len(norm_div_coeffs) == 6\n self.add_coeffs = np.array(norm_add_coeffs).astype('float32')\n self.div_coeffs = np.array(norm_div_coeffs).astype('float32')\n \n self.diffuser_input_mode = diffuser_input_mode\n\n if diffuser_input_mode == 'state_and_action':\n self.default_chosen_inds = [0, 1, 2, 3, 4, 5]\n else:\n raise\n \n self.horizon = horizon\n \n self.observation_dim = observation_dim\n self.action_dim = action_dim\n self.transition_dim = observation_dim + action_dim\n self.output_dim = output_dim\n \n if diffuser_model_arch == \"TemporalMapUnet\":\n transition_in_dim = self.transition_dim\n if self.use_map_feat_grid and self.map_encoder is not None:\n # will be appending map features to each step of trajectory\n transition_in_dim += map_grid_feature_dim\n self.model = TemporalMapUnet(horizon=horizon,\n transition_dim=transition_in_dim,\n cond_dim=cond_out_feat_size,\n output_dim=self.output_dim,\n dim=base_dim,\n dim_mults=dim_mults,\n )\n else:\n print('unknown diffuser_model_arch:', diffuser_model_arch)\n raise\n\n betas = cosine_beta_schedule(n_timesteps)\n alphas = 1. - betas\n alphas_cumprod = torch.cumprod(alphas, axis=0)\n alphas_cumprod_prev = torch.cat([torch.ones(1), alphas_cumprod[:-1]])\n\n self.n_timesteps = int(n_timesteps)\n\n self.register_buffer('betas', betas)\n self.register_buffer('alphas_cumprod', alphas_cumprod)\n self.register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod))\n self.register_buffer('log_one_minus_alphas_cumprod', torch.log(1. - alphas_cumprod))\n self.register_buffer('sqrt_recip_alphas_cumprod', torch.sqrt(1. / alphas_cumprod))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', torch.sqrt(1. / alphas_cumprod - 1))\n\n # calculations for posterior q(x_{t-1} | x_t, x_0)\n posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)\n self.register_buffer('posterior_variance', posterior_variance)\n\n # calculations for class-free guidance\n self.sqrt_alphas_over_one_minus_alphas_cumprod = torch.sqrt(alphas_cumprod / (1.0 - alphas_cumprod))\n self.sqrt_recip_one_minus_alphas_cumprod = 1.0 / torch.sqrt(1. - alphas_cumprod)\n\n ## log calculation clipped because the posterior variance\n ## is 0 at the beginning of the diffusion chain\n self.register_buffer('posterior_log_variance_clipped',\n torch.log(torch.clamp(posterior_variance, min=1e-20)))\n self.register_buffer('posterior_mean_coef1',\n betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))\n self.register_buffer('posterior_mean_coef2',\n (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))\n\n ## get loss coefficients and initialize objective\n loss_weights = self.get_loss_weights(action_weight, loss_discount)\n self.loss_fn = Losses[loss_type](loss_weights, self.action_dim)\n\n # for guided sampling\n self.current_guidance = None\n\n #------------------------------------------ guidance utils ------------------------------------------#\n\n def set_guidance(self, guidance_config_list, example_batch=None):\n '''\n Instantiates test-time guidance functions using the list of configs (dicts) passed in.\n '''\n if guidance_config_list is not None:\n if len(guidance_config_list) > 0 and verify_guidance_config_list(guidance_config_list):\n print('Instantiating test-time guidance with configs:')\n print(guidance_config_list)\n self.current_guidance = DiffuserGuidance(guidance_config_list, example_batch)\n\n def update_guidance(self, **kwargs):\n if self.current_guidance is not None:\n self.current_guidance.update(**kwargs)\n\n def clear_guidance(self):\n self.current_guidance = None\n\n #------------------------------------------ utility ------------------------------------------#\n def _create_dynamics(self):\n if self._dynamics_type in [\"Unicycle\", dynamics.DynType.UNICYCLE]:\n self.dyn = dynamics.Unicycle(\n \"dynamics\",\n max_steer=self._dynamics_kwargs[\"max_steer\"],\n max_yawvel=self._dynamics_kwargs[\"max_yawvel\"],\n acce_bound=self._dynamics_kwargs[\"acce_bound\"]\n )\n else:\n self.dyn = None\n\n def get_aux_info(self, data_batch, include_class_free_cond=False):\n N = data_batch[\"history_positions\"].size(0)\n device = data_batch[\"history_positions\"].device\n\n cond_feat_in = torch.empty((N,0)).to(device)\n non_cond_feat_in = torch.empty((N,0)).to(device)\n\n #\n # current ego state\n #\n # always need this for rolling out actions\n if self._dynamics_type is not None:\n curr_states = batch_utils().get_current_states(data_batch, dyn_type=self.dyn.type())\n else:\n curr_states = None\n\n #\n # rasterized map\n #\n map_grid_feat = map_grid_feat_non_cond = raster_from_agent = None\n if self.map_encoder is not None:\n image_batch = data_batch[\"image\"]\n map_global_feat, map_grid_feat = self.map_encoder(image_batch)\n if self.use_map_feat_global:\n cond_feat_in = torch.cat([cond_feat_in, map_global_feat], dim=-1)\n if self.use_map_feat_grid and self.map_encoder is not None:\n raster_from_agent = data_batch[\"raster_from_agent\"]\n\n if include_class_free_cond:\n image_non_cond = torch.ones_like(image_batch) * self.cond_fill_value\n map_global_feat_non_cond, map_grid_feat_non_cond = self.map_encoder(image_non_cond)\n if self.use_map_feat_global:\n non_cond_feat_in = torch.cat([non_cond_feat_in, map_global_feat_non_cond], dim=-1)\n\n #\n # ego history\n #\n if self.agent_hist_encoder is not None:\n agent_hist_feat = self.agent_hist_encoder(data_batch[\"history_positions\"],\n data_batch[\"history_yaws\"],\n data_batch[\"history_speeds\"],\n data_batch[\"extent\"],\n data_batch[\"history_availabilities\"])\n cond_feat_in = torch.cat([cond_feat_in, agent_hist_feat], dim=-1)\n if include_class_free_cond:\n # make all agents zero availability\n non_cond_avail = torch.zeros_like(data_batch[\"history_speeds\"]).to(torch.bool) # BxT\n agent_hist_feat_non_cond = self.agent_hist_encoder(data_batch[\"history_positions\"],\n data_batch[\"history_yaws\"],\n data_batch[\"history_speeds\"],\n data_batch[\"extent\"],\n non_cond_avail)\n non_cond_feat_in = torch.cat([non_cond_feat_in, agent_hist_feat_non_cond], dim=-1)\n\n #\n # neighbor history\n #\n\n # neighbor trajectory encoding\n if self.neighbor_hist_encoder is not None:\n neighbor_hist_feat = self.neighbor_hist_encoder(data_batch[\"all_other_agents_history_positions\"],\n data_batch[\"all_other_agents_history_yaws\"],\n data_batch[\"all_other_agents_history_speeds\"],\n data_batch[\"all_other_agents_extents\"],\n data_batch[\"all_other_agents_history_availabilities\"])\n cond_feat_in = torch.cat([cond_feat_in, neighbor_hist_feat], dim=-1) \n if include_class_free_cond:\n # make all agents zero availability\n non_cond_neighbor_avail = torch.zeros_like(data_batch[\"all_other_agents_history_speeds\"]).to(torch.bool) # BxNxT\n neighbor_hist_feat_non_cond = self.neighbor_hist_encoder(data_batch[\"all_other_agents_history_positions\"],\n data_batch[\"all_other_agents_history_yaws\"],\n data_batch[\"all_other_agents_history_speeds\"],\n data_batch[\"all_other_agents_extents\"],\n non_cond_neighbor_avail)\n non_cond_feat_in = torch.cat([non_cond_feat_in, neighbor_hist_feat_non_cond], dim=-1)\n\n #\n # Process all features together\n #\n cond_feat = self.process_cond_mlp(cond_feat_in)\n non_cond_feat = None\n if include_class_free_cond:\n non_cond_feat = self.process_cond_mlp(non_cond_feat_in)\n\n aux_info = {\n 'cond_feat': cond_feat, \n 'curr_states': curr_states,\n }\n if include_class_free_cond:\n aux_info['non_cond_feat'] = non_cond_feat\n if self.use_map_feat_grid and self.map_encoder is not None:\n aux_info['map_grid_feat'] = map_grid_feat\n if include_class_free_cond:\n aux_info['map_grid_feat_non_cond'] = map_grid_feat_non_cond\n aux_info['raster_from_agent'] = raster_from_agent\n\n return aux_info\n\n def query_map_feats(self, x, map_grid_feat, raster_from_agent):\n '''\n - x : (B, T, D)\n - map_grid_feat : (B, C, H, W)\n - raster_from_agent: (B, 3, 3)\n '''\n B, T, _ = x.size()\n _, C, Hfeat, Wfeat = map_grid_feat.size()\n\n # unscale to agent coords\n pos_traj = self.descale_traj(x.detach())[:,:,:2]\n # convert to raster frame\n raster_pos_traj = transform_points_tensor(pos_traj, raster_from_agent)\n\n # scale to the feature map size\n _, H, W = self.input_image_shape\n xscale = Wfeat / W\n yscale = Hfeat / H\n raster_pos_traj[:,:,0] = raster_pos_traj[:,:,0] * xscale\n raster_pos_traj[:,:,1] = raster_pos_traj[:,:,1] * yscale\n\n # interpolate into feature grid\n feats_out = query_feature_grid(\n raster_pos_traj,\n map_grid_feat\n )\n feats_out = feats_out.reshape((B, T, -1))\n return feats_out\n\n def get_state_and_action_from_data_batch(self, data_batch, chosen_inds=[]):\n '''\n Extract state and(or) action from the data_batch from data_batch\n\n Input:\n data_batch: dict\n Output:\n x: (batch_size, num_steps, len(chosen_inds)).\n '''\n if len(chosen_inds) == 0:\n chosen_inds = self.default_chosen_inds\n\n # NOTE: for predicted agent, history and future with always be fully available\n traj_state = torch.cat(\n (data_batch[\"target_positions\"], data_batch[\"target_yaws\"]), dim=2)\n\n traj_state_and_action = convert_state_to_state_and_action(traj_state, data_batch[\"curr_speed\"], self.dt)\n\n return traj_state_and_action[..., chosen_inds]\n \n def convert_action_to_state_and_action(self, x_out, aux_info, scaled_input=True, descaled_output=False):\n '''\n Apply dynamics on input action trajectory to get state+action trajectory\n Input:\n x_out: (batch_size, num_steps, 2). scaled action trajectory\n Output:\n x_out: (batch_size, num_steps, 6). scaled state+action trajectory\n '''\n if scaled_input:\n x_out = self.descale_traj(x_out, [4, 5])\n \n x_out_state = unicyle_forward_dynamics(\n dyn_model=self.dyn,\n initial_states=aux_info['curr_states'],\n actions=x_out,\n step_time=self.dt,\n mode='parallel'\n )\n\n x_out_all = torch.cat([x_out_state, x_out], dim=-1)\n if scaled_input and not descaled_output:\n x_out_all = self.scale_traj(x_out_all, [0, 1, 2, 3, 4, 5])\n\n return x_out_all\n\n def scale_traj(self, target_traj_orig, chosen_inds=[]):\n '''\n - traj: B x T x D\n '''\n if len(chosen_inds) == 0:\n chosen_inds = self.default_chosen_inds\n add_coeffs = self.add_coeffs[chosen_inds][None,None] \n div_coeffs = self.div_coeffs[chosen_inds][None,None] \n\n device = target_traj_orig.get_device()\n dx_add = torch.tensor(add_coeffs, device=device)\n dx_div = torch.tensor(div_coeffs, device=device)\n\n target_traj = (target_traj_orig + dx_add) / dx_div\n\n return target_traj\n\n def descale_traj(self, target_traj_orig, chosen_inds=[]):\n '''\n - traj: B x T x D\n '''\n if len(chosen_inds) == 0:\n chosen_inds = self.default_chosen_inds\n add_coeffs = self.add_coeffs[chosen_inds][None,None] \n div_coeffs = self.div_coeffs[chosen_inds][None,None] \n\n device = target_traj_orig.get_device()\n dx_add = torch.tensor(add_coeffs, device=device)\n dx_div = torch.tensor(div_coeffs, device=device) \n\n target_traj = target_traj_orig * dx_div - dx_add\n\n return target_traj\n\n \n def forward(self, data_batch: Dict[str, torch.Tensor], num_samp=1,\n return_diffusion=False,\n return_guidance_losses=False,\n class_free_guide_w=0.0,\n apply_guidance=True,\n guide_clean=False) -> Dict[str, torch.Tensor]:\n use_class_free_guide = class_free_guide_w != 0.0\n aux_info = self.get_aux_info(data_batch, use_class_free_guide)\n \n cond_samp_out = self.conditional_sample(data_batch, \n horizon=None,\n aux_info=aux_info,\n return_diffusion=return_diffusion,\n return_guidance_losses=return_guidance_losses,\n num_samp=num_samp,\n class_free_guide_w=class_free_guide_w,\n apply_guidance=apply_guidance,\n guide_clean=guide_clean)\n traj_init = cond_samp_out['pred_traj']\n diff_init = guide_losses = None\n if return_diffusion:\n diff_init = cond_samp_out['diffusion']\n if return_guidance_losses:\n guide_losses = cond_samp_out['guide_losses']\n\n traj = self.descale_traj(traj_init)\n if diff_init is not None:\n diff_steps = self.descale_traj(diff_init)\n else:\n diff_steps = None\n\n if self.diffuser_input_mode in ['state_and_action']:\n traj = traj[..., [0, 1, 3]]\n else:\n raise\n\n pred_positions = traj[..., :2]\n pred_yaws = traj[..., 2:3]\n\n out_dict = {\n \"trajectories\": traj,\n \"predictions\": {\"positions\": pred_positions, \"yaws\": pred_yaws},\n }\n if diff_steps is not None:\n out_dict[\"predictions\"][\"diffusion_steps\"] = diff_steps\n if guide_losses is not None:\n out_dict[\"predictions\"][\"guide_losses\"] = guide_losses\n if self.dyn is not None:\n out_dict[\"curr_states\"] = aux_info['curr_states']\n\n return out_dict\n\n def compute_losses(self, data_batch):\n aux_info = self.get_aux_info(data_batch)\n target_traj = self.get_state_and_action_from_data_batch(data_batch) \n\n x = self.scale_traj(target_traj)\n \n diffusion_loss, _ = self.loss(x, aux_info=aux_info)\n losses = OrderedDict(\n diffusion_loss=diffusion_loss,\n )\n return losses\n\n def get_loss_weights(self, action_weight, discount):\n '''\n sets loss coefficients for trajectory\n\n action_weight : float\n coefficient on first action loss\n discount : float\n multiplies t^th timestep of trajectory loss by discount**t\n '''\n self.action_weight = action_weight\n\n dim_weights = torch.ones(self.transition_dim, dtype=torch.float32)\n\n ## decay loss with trajectory timestep: discount**t\n discounts = discount ** torch.arange(self.horizon, dtype=torch.float)\n discounts = discounts / discounts.mean()\n loss_weights = torch.einsum('h,t->ht', discounts, dim_weights)\n ## manually set a0 weight\n loss_weights[0, -self.action_dim:] = action_weight\n\n return loss_weights\n\n #------------------------------------------ sampling ------------------------------------------#\n def predict_start_from_noise(self, x_t, t, noise, force_noise=False):\n if force_noise:\n return (\n extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -\n extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise\n )\n else:\n return noise\n\n def predict_noise_from_start(self, x_t, t, x_start):\n return (\n extract(self.sqrt_recip_one_minus_alphas_cumprod.to(x_t.device), t, x_t.shape) * x_t -\n extract(self.sqrt_alphas_over_one_minus_alphas_cumprod.to(x_t.device), t, x_t.shape) * x_start\n )\n\n def q_posterior(self, x_start, x_t, t):\n posterior_mean = (\n extract(self.posterior_mean_coef1, t, x_t.shape) * x_start +\n extract(self.posterior_mean_coef2, t, x_t.shape) * x_t\n )\n posterior_variance = extract(self.posterior_variance, t, x_t.shape)\n posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)\n return posterior_mean, posterior_variance, posterior_log_variance_clipped\n\n def p_mean_variance(self, x, t, aux_info={}, class_free_guide_w=0.0):\n t_inp = t\n\n x_model_in = x\n if self.use_map_feat_grid and self.map_encoder is not None:\n # get features from map and append to the trajectory\n map_feat_traj = self.query_map_feats(x_model_in.detach(),\n aux_info['map_grid_feat'],\n aux_info['raster_from_agent'])\n x_model_in = torch.cat([x_model_in, map_feat_traj], dim=-1)\n\n model_prediction = self.model(x_model_in, aux_info['cond_feat'], t_inp)\n\n if self.diffuser_input_mode == 'state_and_action':\n x_tmp = x[..., 4:].detach()\n else:\n raise\n\n if class_free_guide_w != 0.0:\n # now run non-cond once\n x_model_non_cond_in = x\n if self.use_map_feat_grid and self.map_encoder is not None:\n # get features from map and append to the trajectory\n map_feat_traj = self.query_map_feats(x_model_non_cond_in.detach(),\n aux_info['map_grid_feat_non_cond'],\n aux_info['raster_from_agent'])\n x_model_non_cond_in = torch.cat([x_model_non_cond_in, map_feat_traj], dim=-1)\n model_non_cond_prediction = self.model(x_model_non_cond_in, aux_info['non_cond_feat'], t_inp)\n\n # and combine to get actual model prediction (in noise space as in original paper)\n model_pred_noise = self.predict_noise_from_start(x_tmp, t=t, x_start=model_prediction)\n model_non_cond_pred_noise = self.predict_noise_from_start(x_tmp, t=t, x_start=model_non_cond_prediction)\n\n class_free_guide_noise = (1 + class_free_guide_w)*model_pred_noise - class_free_guide_w*model_non_cond_pred_noise\n\n model_prediction = self.predict_start_from_noise(x_tmp, t=t, noise=class_free_guide_noise, force_noise=True)\n\n x_recon = self.predict_start_from_noise(x_tmp, t=t, noise=model_prediction)\n \n model_mean, posterior_variance, posterior_log_variance = self.q_posterior(\n x_start=x_recon, x_t=x_tmp, t=t)\n return model_mean, posterior_variance, posterior_log_variance, (x_recon, x_tmp, t)\n\n def guidance(self, x, data_batch, aux_info, num_samp=1,\n return_grad_of=None):\n '''\n estimate the gradient of rule reward w.r.t. the input trajectory\n Input:\n x: [batch_size*num_samp, time_steps, feature_dim]. scaled input trajectory.\n data_batch: additional info.\n aux_info: additional info.\n return_grad_of: which variable to take gradient of guidance loss wrt, if not given,\n takes wrt the input x.\n '''\n assert self.current_guidance is not None, 'Must instantiate guidance object before calling'\n bsize = int(x.size(0) / num_samp)\n num_t = x.size(1)\n with torch.enable_grad():\n # losses are applied on unscaled trajectories containing both states and actions\n if self.diffuser_input_mode in ['state_and_action']:\n # forward dynamics to get actions\n x_all = self.convert_action_to_state_and_action(x, aux_info, scaled_input=True, descaled_output=True)\n else:\n raise\n\n # compute losses and gradient\n x_loss = x_all.reshape((bsize, num_samp, num_t, 6))\n tot_loss, per_losses = self.current_guidance.compute_guidance_loss(x_loss, data_batch)\n # print(tot_loss)\n tot_loss.backward()\n guide_grad = x.grad if return_grad_of is None else return_grad_of.grad\n\n return guide_grad, per_losses\n\n @torch.no_grad()\n def p_sample(self, x, t, data_batch, aux_info={}, num_samp=1, class_free_guide_w=0.0, \n apply_guidance=True, guide_clean=False, eval_final_guide_loss=False):\n b, *_, device = *x.shape, x.device\n with_func = torch.no_grad\n if self.current_guidance is not None and apply_guidance and guide_clean:\n # will need to take grad wrt noisy input\n x = x.detach()\n x.requires_grad_()\n with_func = torch.enable_grad\n\n with with_func():\n # get prior mean and variance for next step\n model_mean, _, model_log_variance, q_posterior_in = self.p_mean_variance(x=x, t=t, aux_info=aux_info,\n class_free_guide_w=class_free_guide_w)\n\n # no noise or guidance when t == 0\n # i.e. use the mean of the distribution predicted at the final step rather than sampling.\n nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))\n\n noise = torch.randn_like(model_mean)\n sigma = (0.5 * model_log_variance).exp()\n\n # compute guidance\n guide_losses = None\n guide_grad = torch.zeros_like(model_mean)\n if self.current_guidance is not None and apply_guidance:\n if guide_clean:\n # want to guide the predicted clean traj from model, not the noisy one\n model_clean_pred = q_posterior_in[0]\n x_guidance = model_clean_pred\n return_grad_of = x\n else:\n x_guidance = model_mean.clone().detach()\n return_grad_of = x_guidance\n x_guidance.requires_grad_()\n\n guide_grad, guide_losses = self.guidance(x_guidance, data_batch, aux_info, num_samp=num_samp, return_grad_of=return_grad_of)\n\n if guide_clean and self.diffuser_input_mode == 'state_and_action':\n # only need the grad w.r.t noisy action\n guide_grad = guide_grad[..., [4,5]]\n\n # NOTE: empirally, scaling by the variance (sigma) seems to degrade results\n guide_grad = nonzero_mask * guide_grad #* sigma\n\n noise = nonzero_mask * sigma * noise\n\n if self.current_guidance is not None and guide_clean:\n # perturb clean trajectory\n guided_clean = q_posterior_in[0] - guide_grad\n # use the same noisy input again\n guided_x_t = q_posterior_in[1]\n # re-compute next step distribution with guided clean & noisy trajectories\n model_mean, _, _ = self.q_posterior(x_start=guided_clean,\n x_t=guided_x_t,\n t=q_posterior_in[2])\n # NOTE: variance is not dependent on x_start, so it won't change. Therefore, fine to use same noise.\n x_out = model_mean + noise\n else:\n x_out = model_mean - guide_grad + noise\n\n if self.current_guidance is not None and eval_final_guide_loss:\n # eval guidance loss one last time for filtering if desired\n # (even if not applied during sampling)\n _, guide_losses = self.guidance(x_out.clone().detach().requires_grad_(), data_batch, aux_info, num_samp=num_samp)\n \n # convert action to state+action\n if self.diffuser_input_mode == 'state_and_action':\n x_out = self.convert_action_to_state_and_action(x_out, aux_info)\n \n return x_out, guide_losses\n\n \n @torch.no_grad()\n def p_sample_loop(self, shape, data_batch, num_samp,\n aux_info={},\n return_diffusion=False,\n return_guidance_losses=False,\n class_free_guide_w=0.0,\n apply_guidance=True,\n guide_clean=False):\n device = self.betas.device\n\n batch_size = shape[0]\n # sample from base distribution\n x = torch.randn(shape, device=device) # (B, N, T, D)\n\n x = TensorUtils.join_dimensions(x, begin_axis=0, end_axis=2) # B*N, T, D\n aux_info = TensorUtils.repeat_by_expand_at(aux_info, repeats=num_samp, dim=0)\n\n if self.current_guidance is not None and not apply_guidance:\n print('DIFFUSER: Note, not using guidance during sampling, only evaluating guidance loss at very end...')\n\n # convert action to state+action\n if self.diffuser_input_mode == 'state_and_action':\n x = self.convert_action_to_state_and_action(x[..., [4, 5]], aux_info)\n\n if return_diffusion: diffusion = [x]\n\n stride = 1 # NOTE: different from training time if > 1\n steps = [i for i in reversed(range(0, self.n_timesteps, stride))]\n for i in steps:\n timesteps = torch.full((batch_size*num_samp,), i, device=device, dtype=torch.long)\n \n x, guide_losses = self.p_sample(x, timesteps, data_batch,\n aux_info=aux_info,\n num_samp=num_samp,\n class_free_guide_w=class_free_guide_w,\n apply_guidance=apply_guidance,\n guide_clean=guide_clean,\n eval_final_guide_loss=(i == steps[-1]))\n \n\n if return_diffusion: diffusion.append(x)\n\n if guide_losses is not None:\n print('===== GUIDANCE LOSSES ======')\n for k,v in guide_losses.items():\n print('%s: %.012f' % (k, np.nanmean(v.cpu())))\n\n x = TensorUtils.reshape_dimensions(x, begin_axis=0, end_axis=1, target_dims=(batch_size, num_samp))\n\n out_dict = {'pred_traj' : x}\n if return_guidance_losses:\n out_dict['guide_losses'] = guide_losses\n if return_diffusion:\n diffusion = [TensorUtils.reshape_dimensions(cur_diff, begin_axis=0, end_axis=1, target_dims=(batch_size, num_samp))\n for cur_diff in diffusion]\n out_dict['diffusion'] = torch.stack(diffusion, dim=3)\n\n return out_dict\n\n @torch.no_grad()\n def conditional_sample(self, data_batch, horizon=None, num_samp=1, class_free_guide_w=0.0, **kwargs):\n batch_size = data_batch['history_positions'].size()[0]\n horizon = horizon or self.horizon\n shape = (batch_size, num_samp, horizon, self.transition_dim)\n\n return self.p_sample_loop(shape, data_batch, num_samp, class_free_guide_w=class_free_guide_w, **kwargs)\n\n #------------------------------------------ training ------------------------------------------#\n\n def q_sample(self, x_start, t, noise): \n sample = (\n extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +\n extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise\n )\n return sample\n\n def p_losses(self, x_start_init, t, aux_info={}):\n noise_init = torch.randn_like(x_start_init)\n\n x_start = x_start_init\n noise = noise_init\n x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)\n t_inp = t\n\n if self.diffuser_input_mode == 'state_and_action':\n x_action_noisy = x_noisy[..., [4, 5]]\n x_noisy = self.convert_action_to_state_and_action(x_action_noisy, aux_info)\n\n if self.use_map_feat_grid and self.map_encoder is not None:\n # get features from map and append to the trajectory\n map_feat_traj = self.query_map_feats(x_noisy.detach(),\n aux_info['map_grid_feat'],\n aux_info['raster_from_agent'])\n x_noisy = torch.cat([x_noisy, map_feat_traj], dim=-1)\n\n noise = self.model(x_noisy, aux_info['cond_feat'], t_inp)\n\n if self.diffuser_input_mode == 'state_and_action':\n x_recon_action = self.predict_start_from_noise(x_action_noisy, t=t, noise=noise)\n x_recon = self.convert_action_to_state_and_action(x_recon_action, aux_info)\n else:\n x_recon = self.predict_start_from_noise(x_noisy, t=t, noise=noise)\n\n # Note: we convert noise into x_start for loss estimation since we need to apply forward dynamics\n loss, info = self.loss_fn(x_recon, x_start)\n\n return loss, info\n\n def loss(self, x, aux_info={}):\n batch_size = len(x)\n t = torch.randint(0, self.n_timesteps, (batch_size,), device=x.device).long()\n \n return self.p_losses(x, t, aux_info=aux_info)" }, { "identifier": "EMA", "path": "tbsim/models/trace_helpers.py", "snippet": "class EMA():\n '''\n empirical moving average\n '''\n def __init__(self, beta):\n super().__init__()\n self.beta = beta\n\n def update_model_average(self, ma_model, current_model):\n with torch.no_grad():\n ema_state_dict = ma_model.state_dict()\n for key, value in current_model.state_dict().items():\n ema_value = ema_state_dict[key]\n ema_value.copy_(self.beta * ema_value + (1. - self.beta) * value)" }, { "identifier": "choose_action_from_guidance", "path": "tbsim/utils/guidance_loss.py", "snippet": "def choose_action_from_guidance(preds, obs_dict, guide_configs, guide_losses):\n B, N, T, _ = preds[\"positions\"].size()\n # arbitrarily use the first sample as the action if no guidance given\n act_idx = torch.zeros((B), dtype=torch.long, device=preds[\"positions\"].device)\n # choose sample closest to desired guidance\n accum_guide_loss = torch.stack([v for k,v in guide_losses.items()], dim=2)\n # each scene separately since may contain different guidance\n scount = 0\n for sidx in range(len(guide_configs)):\n scene_guide_cfg = guide_configs[sidx]\n ends = scount + len(scene_guide_cfg)\n scene_guide_loss = accum_guide_loss[..., scount:ends]\n scount = ends\n scene_mask = ~torch.isnan(torch.sum(scene_guide_loss, dim=[1,2]))\n scene_guide_loss = scene_guide_loss[scene_mask].cpu()\n scene_guide_loss = torch.nansum(scene_guide_loss, dim=-1)\n is_scene_level = np.array([guide_cfg.name in ['agent_collision', 'social_group'] for guide_cfg in scene_guide_cfg])\n if np.sum(is_scene_level) > 0: \n # choose which sample minimizes at the scene level (where each sample is a \"scene\")\n scene_act_idx = torch.argmin(torch.sum(scene_guide_loss, dim=0))\n else:\n # each agent can choose the sample that minimizes guidance loss independently\n scene_act_idx = torch.argmin(scene_guide_loss, dim=-1)\n\n act_idx[scene_mask] = scene_act_idx.to(act_idx.device)\n\n return act_idx" }, { "identifier": "choose_action_from_gt", "path": "tbsim/utils/guidance_loss.py", "snippet": "def choose_action_from_gt(preds, obs_dict):\n B, N, T, _ = preds[\"positions\"].size()\n # arbitrarily use the first sample as the action if no gt given\n act_idx = torch.zeros((B), dtype=torch.long, device=preds[\"positions\"].device)\n if \"target_positions\" in obs_dict:\n print(\"DIFFUSER: WARNING using sample closest to GT from diffusion model!\")\n # use the sample closest to GT\n # pred and gt may not be the same if gt is missing data at the end\n endT = min(T, obs_dict[\"target_positions\"].size(1))\n pred_pos = preds[\"positions\"][:,:,:endT]\n gt_pos = obs_dict[\"target_positions\"][:,:endT].unsqueeze(1)\n gt_valid = obs_dict[\"target_availabilities\"][:,:endT].unsqueeze(1).expand((B, N, endT))\n err = torch.norm(pred_pos - gt_pos, dim=-1)\n err[~gt_valid] = torch.nan # so doesn't affect\n ade = torch.nanmean(err, dim=-1) # B x N\n res_valid = torch.sum(torch.isnan(ade), dim=-1) == 0\n if torch.sum(res_valid) > 0:\n min_ade_idx = torch.argmin(ade, dim=-1)\n act_idx[res_valid] = min_ade_idx[res_valid]\n else:\n print('Could not choose sample based on GT, as no GT in data')\n\n return act_idx" } ]
import numpy as np import copy import torch import torch.nn as nn import torch.optim as optim import pytorch_lightning as pl import torch.nn.functional as F import tbsim.utils.tensor_utils as TensorUtils import tbsim.utils.metrics as Metrics from tbsim.utils.batch_utils import batch_utils from tbsim.policies.common import Action from tbsim.models.trace import DiffuserModel from tbsim.models.trace_helpers import EMA from tbsim.utils.guidance_loss import choose_action_from_guidance, choose_action_from_gt
11,211
use_map_feat_global=algo_config.use_map_feat_global, use_map_feat_grid=algo_config.use_map_feat_grid, map_encoder_model_arch=algo_config.map_encoder_model_arch, input_image_shape=modality_shapes["image"], # [C, H, W] map_feature_dim=algo_config.map_feature_dim, map_grid_feature_dim=algo_config.map_grid_feature_dim, hist_num_frames=algo_config.history_num_frames+1, # the current step is concat to the history hist_feature_dim=algo_config.history_feature_dim, cond_feature_dim=algo_config.cond_feat_dim, diffuser_model_arch=algo_config.diffuser_model_arch, horizon=algo_config.horizon, observation_dim=observation_dim, action_dim=action_dim, output_dim=output_dim, n_timesteps=algo_config.n_diffusion_steps, loss_type=algo_config.loss_type, action_weight=algo_config.action_weight, loss_discount=algo_config.loss_discount, dim_mults=algo_config.dim_mults, dynamics_type=algo_config.dynamics.type, dynamics_kwargs=algo_config.dynamics, base_dim=algo_config.base_dim, diffuser_input_mode=algo_config.diffuser_input_mode, use_conditioning=self.use_cond, cond_fill_value=self.cond_fill_val, diffuser_norm_info=algo_config.diffuser_norm_info, agent_hist_norm_info=algo_config.agent_hist_norm_info, neighbor_hist_norm_info=algo_config.neighbor_hist_norm_info, dt=algo_config.step_time, ) # set up initial guidance if guidance_config is not None: self.set_guidance(guidance_config) # set up EMA self.use_ema = algo_config.use_ema if self.use_ema: print('DIFFUSER: using EMA... val and get_action will use ema model') self.ema = EMA(algo_config.ema_decay) self.ema_policy = copy.deepcopy(self.nets["policy"]) self.ema_policy.requires_grad_(False) self.ema_update_every = algo_config.ema_step self.ema_start_step = algo_config.ema_start_step self.reset_parameters() self.cur_train_step = 0 @property def checkpoint_monitor_keys(self): if self.use_ema: return {"valLoss": "val/ema_losses_diffusion_loss"} else: return {"valLoss": "val/losses_diffusion_loss"} def forward(self, obs_dict, num_samp=1, class_free_guide_w=0.0, guide_as_filter_only=False, guide_clean=False): cur_policy = self.nets["policy"] # this function is only called at validation time, so use ema if self.use_ema: cur_policy = self.ema_policy return cur_policy(obs_dict, num_samp, return_diffusion=True, return_guidance_losses=True, class_free_guide_w=class_free_guide_w, apply_guidance=(not guide_as_filter_only), guide_clean=guide_clean)["predictions"] def _compute_metrics(self, pred_batch, data_batch): metrics = {} predictions = pred_batch["predictions"] preds = TensorUtils.to_numpy(predictions["positions"]) gt = TensorUtils.to_numpy(data_batch["target_positions"]) avail = TensorUtils.to_numpy(data_batch["target_availabilities"]) # compute ADE & FDE based on trajectory samples sample_preds = preds conf = np.ones(sample_preds.shape[0:2]) / float(sample_preds.shape[1]) metrics["ego_avg_ADE"] = Metrics.batch_average_displacement_error(gt, sample_preds, conf, avail, "mean").mean() metrics["ego_min_ADE"] = Metrics.batch_average_displacement_error(gt, sample_preds, conf, avail, "oracle").mean() metrics["ego_avg_FDE"] = Metrics.batch_final_displacement_error(gt, sample_preds, conf, avail, "mean").mean() metrics["ego_min_FDE"] = Metrics.batch_final_displacement_error(gt, sample_preds, conf, avail, "oracle").mean() # compute diversity scores based on trajectory samples metrics["ego_avg_APD"] = Metrics.batch_average_diversity(gt, sample_preds, conf, avail, "mean").mean() metrics["ego_max_APD"] = Metrics.batch_average_diversity(gt, sample_preds, conf, avail, "max").mean() metrics["ego_avg_FPD"] = Metrics.batch_final_diversity(gt, sample_preds, conf, avail, "mean").mean() metrics["ego_max_FPD"] = Metrics.batch_final_diversity(gt, sample_preds, conf, avail, "max").mean() return metrics def reset_parameters(self): self.ema_policy.load_state_dict(self.nets["policy"].state_dict()) def step_ema(self, step): if step < self.ema_start_step: self.reset_parameters() return self.ema.update_model_average(self.ema_policy, self.nets["policy"]) def training_step_end(self, batch_parts): self.cur_train_step += 1 def training_step(self, batch, batch_idx): """ Training on a single batch of data. Args: batch (dict): dictionary with torch.Tensors sampled from a data loader and filtered by @process_batch_for_training batch_idx (int): training step number (relative to the CURRENT epoch) - required by some Algos that need to perform staged training and early stopping Returns: info (dict): dictionary of relevant inputs, outputs, and losses that might be relevant for logging """ if self.use_ema and self.cur_train_step % self.ema_update_every == 0: self.step_ema(self.cur_train_step)
class DiffuserTrafficModel(pl.LightningModule): def __init__(self, algo_config, modality_shapes, guidance_config=None): """ Creates networks and places them into @self.nets. """ super(DiffuserTrafficModel, self).__init__() self.algo_config = algo_config self.nets = nn.ModuleDict() if algo_config.diffuser_input_mode == 'state_and_action': # "Observations" are inputs to diffuser that are not outputs observation_dim = 4 # x, y, vel, yaw # "Actions" are inputs and outputs action_dim = 2 # acc, yawvel # "output" is final output of the entired denoising process output_dim = 2 # acc, yawvel else: raise self.cond_drop_map_p = algo_config.conditioning_drop_map_p self.cond_drop_neighbor_p = algo_config.conditioning_drop_neighbor_p min_cond_drop_p = min([self.cond_drop_map_p, self.cond_drop_neighbor_p]) max_cond_drop_p = max([self.cond_drop_map_p, self.cond_drop_neighbor_p]) assert min_cond_drop_p >= 0.0 and max_cond_drop_p <= 1.0 self.use_cond = self.cond_drop_map_p < 1.0 and self.cond_drop_neighbor_p < 1.0 # no need for conditioning arch if always dropping self.cond_fill_val = algo_config.conditioning_drop_fill self.use_rasterized_map = algo_config.rasterized_map if self.use_cond: if self.cond_drop_map_p > 0: print('DIFFUSER: Dropping map input conditioning with p = %f during training...' % (self.cond_drop_map_p)) if self.cond_drop_neighbor_p > 0: print('DIFFUSER: Dropping neighbor traj input conditioning with p = %f during training...' % (self.cond_drop_neighbor_p)) self.nets["policy"] = DiffuserModel( rasterized_map=algo_config.rasterized_map, use_map_feat_global=algo_config.use_map_feat_global, use_map_feat_grid=algo_config.use_map_feat_grid, map_encoder_model_arch=algo_config.map_encoder_model_arch, input_image_shape=modality_shapes["image"], # [C, H, W] map_feature_dim=algo_config.map_feature_dim, map_grid_feature_dim=algo_config.map_grid_feature_dim, hist_num_frames=algo_config.history_num_frames+1, # the current step is concat to the history hist_feature_dim=algo_config.history_feature_dim, cond_feature_dim=algo_config.cond_feat_dim, diffuser_model_arch=algo_config.diffuser_model_arch, horizon=algo_config.horizon, observation_dim=observation_dim, action_dim=action_dim, output_dim=output_dim, n_timesteps=algo_config.n_diffusion_steps, loss_type=algo_config.loss_type, action_weight=algo_config.action_weight, loss_discount=algo_config.loss_discount, dim_mults=algo_config.dim_mults, dynamics_type=algo_config.dynamics.type, dynamics_kwargs=algo_config.dynamics, base_dim=algo_config.base_dim, diffuser_input_mode=algo_config.diffuser_input_mode, use_conditioning=self.use_cond, cond_fill_value=self.cond_fill_val, diffuser_norm_info=algo_config.diffuser_norm_info, agent_hist_norm_info=algo_config.agent_hist_norm_info, neighbor_hist_norm_info=algo_config.neighbor_hist_norm_info, dt=algo_config.step_time, ) # set up initial guidance if guidance_config is not None: self.set_guidance(guidance_config) # set up EMA self.use_ema = algo_config.use_ema if self.use_ema: print('DIFFUSER: using EMA... val and get_action will use ema model') self.ema = EMA(algo_config.ema_decay) self.ema_policy = copy.deepcopy(self.nets["policy"]) self.ema_policy.requires_grad_(False) self.ema_update_every = algo_config.ema_step self.ema_start_step = algo_config.ema_start_step self.reset_parameters() self.cur_train_step = 0 @property def checkpoint_monitor_keys(self): if self.use_ema: return {"valLoss": "val/ema_losses_diffusion_loss"} else: return {"valLoss": "val/losses_diffusion_loss"} def forward(self, obs_dict, num_samp=1, class_free_guide_w=0.0, guide_as_filter_only=False, guide_clean=False): cur_policy = self.nets["policy"] # this function is only called at validation time, so use ema if self.use_ema: cur_policy = self.ema_policy return cur_policy(obs_dict, num_samp, return_diffusion=True, return_guidance_losses=True, class_free_guide_w=class_free_guide_w, apply_guidance=(not guide_as_filter_only), guide_clean=guide_clean)["predictions"] def _compute_metrics(self, pred_batch, data_batch): metrics = {} predictions = pred_batch["predictions"] preds = TensorUtils.to_numpy(predictions["positions"]) gt = TensorUtils.to_numpy(data_batch["target_positions"]) avail = TensorUtils.to_numpy(data_batch["target_availabilities"]) # compute ADE & FDE based on trajectory samples sample_preds = preds conf = np.ones(sample_preds.shape[0:2]) / float(sample_preds.shape[1]) metrics["ego_avg_ADE"] = Metrics.batch_average_displacement_error(gt, sample_preds, conf, avail, "mean").mean() metrics["ego_min_ADE"] = Metrics.batch_average_displacement_error(gt, sample_preds, conf, avail, "oracle").mean() metrics["ego_avg_FDE"] = Metrics.batch_final_displacement_error(gt, sample_preds, conf, avail, "mean").mean() metrics["ego_min_FDE"] = Metrics.batch_final_displacement_error(gt, sample_preds, conf, avail, "oracle").mean() # compute diversity scores based on trajectory samples metrics["ego_avg_APD"] = Metrics.batch_average_diversity(gt, sample_preds, conf, avail, "mean").mean() metrics["ego_max_APD"] = Metrics.batch_average_diversity(gt, sample_preds, conf, avail, "max").mean() metrics["ego_avg_FPD"] = Metrics.batch_final_diversity(gt, sample_preds, conf, avail, "mean").mean() metrics["ego_max_FPD"] = Metrics.batch_final_diversity(gt, sample_preds, conf, avail, "max").mean() return metrics def reset_parameters(self): self.ema_policy.load_state_dict(self.nets["policy"].state_dict()) def step_ema(self, step): if step < self.ema_start_step: self.reset_parameters() return self.ema.update_model_average(self.ema_policy, self.nets["policy"]) def training_step_end(self, batch_parts): self.cur_train_step += 1 def training_step(self, batch, batch_idx): """ Training on a single batch of data. Args: batch (dict): dictionary with torch.Tensors sampled from a data loader and filtered by @process_batch_for_training batch_idx (int): training step number (relative to the CURRENT epoch) - required by some Algos that need to perform staged training and early stopping Returns: info (dict): dictionary of relevant inputs, outputs, and losses that might be relevant for logging """ if self.use_ema and self.cur_train_step % self.ema_update_every == 0: self.step_ema(self.cur_train_step)
batch = batch_utils().parse_batch(batch)
0
2023-10-31 18:43:07+00:00
16k
nv-tlabs/pacer
uhc/smpllib/np_smpl_humanoid_batch.py
[ { "identifier": "dict_to_torch", "path": "uhc/utils/torch_ext.py", "snippet": "def dict_to_torch(input_dict, dtype = None, device = None, add_dim = False):\n if not isinstance(input_dict, dict):\n return None\n out_dict = {}\n for key, value in input_dict.items():\n if isinstance(value, np.ndarray):\n value = torch.from_numpy(value)\n else:\n pass\n\n if torch.is_tensor(value):\n if dtype is not None:\n value = value.type(dtype)\n if device is not None:\n value = value.to(device)\n if add_dim:\n value = value[None, ]\n\n out_dict[key] = value\n\n return out_dict" }, { "identifier": "SMPLConverter", "path": "uhc/smpllib/smpl_mujoco.py", "snippet": "class SMPLConverter:\nclass SMPL_M_Renderer(object):\nclass SMPL_M_Viewer(object):\n def __init__(self, model, new_model, smpl_model=\"smpl\"):\n def qpos_smpl_2_new(self, qpos):\n def qvel_smpl_2_new(self, qpvel):\n def qpos_new_2_smpl(self, qpos):\n def qvel_new_2_smpl(self, qvel):\n def jpos_new_2_smpl(self, jpos):\n def get_new_qpos_lim(self):\n def get_new_qvel_lim(self):\n def get_new_body_lim(self):\n def get_new_diff_weight(self):\n def get_new_jkp(self):\n def get_new_jkd(self):\n def get_new_a_scale(self):\n def get_new_torque_limit(self):\n def __init__(\n self,\n model_file=\"/hdd/zen/dev/copycat/Copycat/assets/mujoco_models/humanoid_smpl_neutral_mesh.xml\",\n render_size=(960, 480),\n ):\n def render_smpl(\n self,\n body_pose,\n tran=None,\n output_name=None,\n size=(960, 480),\n frame_rate=30,\n add_text=None,\n offset_z=0,\n ):\n def render_qpose_and_write(\n self,\n qpos,\n output_name=None,\n size=(960, 480),\n frame_rate=30,\n add_text=None,\n offset_z=0,\n follow=False,\n ):\n def render_qpose(\n self,\n qpose,\n size=(960, 480),\n frame_rate=30,\n add_text=None,\n offset_z=0,\n follow=False,\n ):\n def show_pose(self, size=(960, 480), loop=False):\n def set_smpl_pose(self, pose, tran=None, offset_z=0):\n def set_smpl_pose_6d(self, full_pose, tran=None, offset_z=0):\n def set_qpose(self, qpose):\n def show_pose_thread(self, return_img=False):\n def __init__(\n self,\n model_file=\"/hdd/zen/dev/copycat/Copycat/assets/mujoco_models/humanoid_smpl_neutral_mesh.xml\",\n render_size=(960, 480),\n ):\n def render_qpose(self, qpose, follow=False):\n def show_pose(self, return_img=False, size=(1920, 1080), loop=False):\n def show_pose_in_thread(self, return_img=False, size=(1920, 1080)):\n def show_pose_thread(self, return_img=False):\n def set_smpl_pose(self, pose, trans=None, offset_z=0):\n def set_smpl_pose_6d(self, full_pose, offset_z=0):\n def set_qpose(self, qpose):\ndef smplh_to_smpl(pose):\ndef smpl_to_smplh(pose):\ndef smpl_to_qpose(\n pose,\n mj_model,\n trans=None,\n normalize=False,\n random_root=False,\n count_offset=True,\n use_quat=False,\n euler_order=\"ZYX\",\n model=\"smpl\",\n):\ndef smpl_to_qpose_multi(\n pose,\n offset,\n mujoco_body_order,\n num_people=1,\n trans=None,\n normalize=False,\n random_root=False,\n count_offset=True,\n use_quat=False,\n euler_order=\"ZYX\",\n model=\"smpl\",\n):\ndef smpl_to_qpose_torch(\n pose,\n mj_model,\n trans=None,\n normalize=False,\n random_root=False,\n count_offset=True,\n use_quat=False,\n euler_order=\"ZYX\",\n model=\"smpl\",\n):\ndef qpos_to_smpl(qpos, mj_model, smpl_model=\"smpl\"):\ndef qpos_to_smpl_torch(qpos, mj_model, smpl_model=\"smpl\"):\ndef smpl_6d_to_qpose(full_pose, model, normalize=False):\ndef normalize_smpl_pose(pose_aa, trans=None, random_root=False):" }, { "identifier": "SMPL_EE_NAMES", "path": "uhc/smpllib/smpl_parser.py", "snippet": "SMPL_EE_NAMES = [\"L_Ankle\", \"R_Ankle\", \"L_Wrist\", \"R_Wrist\", \"Head\"]" }, { "identifier": "get_expert", "path": "uhc/utils/tools.py", "snippet": "def get_expert(expert_qpos, expert_meta, env):\n old_state = env.sim.get_state()\n expert = defaultdict(list)\n expert[\"qpos\"] = expert_qpos\n expert[\"meta\"] = expert_meta\n feat_keys = {\n \"qvel\",\n \"rlinv\",\n \"rlinv_local\",\n \"rangv\",\n \"rq_rmh\",\n \"com\",\n \"body_com\",\n \"head_pose\",\n \"ee_pos\",\n \"ee_wpos\",\n \"bquat\",\n \"bangvel\",\n \"wbpos\",\n \"wbquat\",\n }\n\n for i in range(expert_qpos.shape[0]):\n qpos = expert_qpos[i]\n env.data.qpos[:76] = qpos\n env.sim.forward()\n rq_rmh = de_heading(qpos[3:7])\n ee_pos = env.get_ee_pos(env.cc_cfg.obs_coord)\n wbpos = env.get_wbody_pos()\n wbquat = env.get_wbody_quat()\n\n ee_wpos = env.get_ee_pos(None)\n bquat = env.get_body_quat() # current pose (body) in quaternion\n com = env.get_com().copy()\n head_pose = env.get_head().copy()\n body_com = env.get_body_com()\n\n if i > 0:\n prev_qpos = expert_qpos[i - 1]\n qvel = get_qvel_fd_new(prev_qpos, qpos, env.dt)\n qvel = qvel.clip(-10.0, 10.0)\n rlinv = qvel[:3].copy()\n rlinv_local = transform_vec(\n qvel[:3].copy(), qpos[3:7], env.cc_cfg.obs_coord\n )\n rangv = qvel[3:6].copy()\n expert[\"qvel\"].append(qvel)\n expert[\"rlinv\"].append(rlinv)\n expert[\"rlinv_local\"].append(rlinv_local)\n expert[\"rangv\"].append(rangv)\n\n expert[\"wbquat\"].append(wbquat)\n expert[\"wbpos\"].append(wbpos)\n expert[\"ee_pos\"].append(ee_pos)\n expert[\"ee_wpos\"].append(ee_wpos)\n expert[\"bquat\"].append(bquat)\n expert[\"com\"].append(com)\n expert[\"body_com\"].append(body_com)\n expert[\"head_pose\"].append(head_pose)\n expert[\"rq_rmh\"].append(rq_rmh)\n\n expert[\"qvel\"].insert(0, expert[\"qvel\"][0].copy())\n expert[\"rlinv\"].insert(0, expert[\"rlinv\"][0].copy())\n expert[\"rlinv_local\"].insert(0, expert[\"rlinv_local\"][0].copy())\n expert[\"rangv\"].insert(0, expert[\"rangv\"][0].copy())\n # get expert body quaternions\n for i in range(1, expert_qpos.shape[0]):\n bangvel = get_angvel_fd(expert[\"bquat\"][i - 1], expert[\"bquat\"][i], env.dt)\n expert[\"bangvel\"].append(bangvel)\n expert[\"bangvel\"].insert(0, expert[\"bangvel\"][0].copy())\n\n for key in feat_keys:\n expert[key] = np.vstack(expert[key])\n\n expert[\"len\"] = expert[\"qpos\"].shape[0]\n expert[\"height_lb\"] = expert[\"qpos\"][:, 2].min()\n expert[\"head_height_lb\"] = expert[\"head_pose\"][:, 2].min()\n if expert_meta[\"cyclic\"]:\n expert[\"init_heading\"] = get_heading_q(expert_qpos[0, 3:7])\n expert[\"init_pos\"] = expert_qpos[0, :3].copy()\n env.sim.set_state(old_state)\n env.sim.forward()\n return expert" }, { "identifier": "get_expert_master", "path": "uhc/utils/tools.py", "snippet": "def get_expert_master(expert_qpos, expert_meta, env):\n old_state = env.sim.get_state()\n expert = defaultdict(list)\n expert_qpos = env.converter.qpos_smpl_2_new(expert_qpos)\n expert[\"qpos\"] = expert_qpos\n expert[\"meta\"] = expert_meta\n feat_keys = {\n \"qvel\",\n \"rlinv\",\n \"rlinv_local\",\n \"rangv\",\n \"rq_rmh\",\n \"com\",\n \"body_com\",\n \"head_pose\",\n \"ee_pos\",\n \"ee_wpos\",\n \"bquat\",\n \"bangvel\",\n \"wbpos\",\n \"wbquat\",\n }\n for i in range(expert_qpos.shape[0]):\n qpos = expert_qpos[i]\n env.data.qpos[: env.qpos_lim] = qpos\n env.sim.forward()\n rq_rmh = de_heading(qpos[3:7])\n ee_pos = env.get_ee_pos(env.cc_cfg.obs_coord)\n wbpos = env.get_wbody_pos()\n wbquat = env.get_wbody_quat()\n\n ee_wpos = env.get_ee_pos(None)\n bquat = env.get_body_quat() # current pose (body) in quaternion\n com = env.get_com()\n head_pose = env.get_head().copy()\n body_com = env.get_body_com()\n\n if i > 0:\n prev_qpos = expert_qpos[i - 1]\n qvel = get_qvel_fd_new(prev_qpos, qpos, env.dt)\n qvel = qvel.clip(-10.0, 10.0)\n rlinv = qvel[:3].copy()\n rlinv_local = transform_vec(\n qvel[:3].copy(), qpos[3:7], env.cc_cfg.obs_coord\n )\n rangv = qvel[3:6].copy()\n expert[\"qvel\"].append(qvel)\n expert[\"rlinv\"].append(rlinv)\n expert[\"rlinv_local\"].append(rlinv_local)\n expert[\"rangv\"].append(rangv)\n\n expert[\"wbquat\"].append(wbquat)\n expert[\"wbpos\"].append(wbpos)\n expert[\"ee_pos\"].append(ee_pos)\n expert[\"ee_wpos\"].append(ee_wpos)\n expert[\"bquat\"].append(bquat)\n expert[\"com\"].append(com)\n expert[\"body_com\"].append(body_com)\n expert[\"head_pose\"].append(head_pose)\n expert[\"rq_rmh\"].append(rq_rmh)\n\n expert[\"qvel\"].insert(0, expert[\"qvel\"][0].copy())\n expert[\"rlinv\"].insert(0, expert[\"rlinv\"][0].copy())\n expert[\"rlinv_local\"].insert(0, expert[\"rlinv_local\"][0].copy())\n expert[\"rangv\"].insert(0, expert[\"rangv\"][0].copy())\n # get expert body quaternions\n for i in range(1, expert_qpos.shape[0]):\n bangvel = get_angvel_fd(expert[\"bquat\"][i - 1], expert[\"bquat\"][i], env.dt)\n expert[\"bangvel\"].append(bangvel)\n expert[\"bangvel\"].insert(0, expert[\"bangvel\"][0].copy())\n\n for key in feat_keys:\n expert[key] = np.vstack(expert[key])\n\n expert[\"len\"] = expert[\"qpos\"].shape[0]\n expert[\"height_lb\"] = expert[\"qpos\"][:, 2].min()\n expert[\"head_height_lb\"] = expert[\"head_pose\"][:, 2].min()\n if expert_meta[\"cyclic\"]:\n expert[\"init_heading\"] = get_heading_q(expert_qpos[0, 3:7])\n expert[\"init_pos\"] = expert_qpos[0, :3].copy()\n env.sim.set_state(old_state)\n env.sim.forward()\n return expert" }, { "identifier": "SMPL_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPL_Parser(_SMPL):\n def __init__(self, create_transl=False, *args, **kwargs):\n \"\"\"SMPL model constructor\n Parameters\n ----------\n model_path: str\n The path to the folder or to the file where the model\n parameters are stored\n data_struct: Strct\n A struct object. If given, then the parameters of the model are\n read from the object. Otherwise, the model tries to read the\n parameters from the given `model_path`. (default = None)\n create_global_orient: bool, optional\n Flag for creating a member variable for the global orientation\n of the body. (default = True)\n global_orient: torch.tensor, optional, Bx3\n The default value for the global orientation variable.\n (default = None)\n create_body_pose: bool, optional\n Flag for creating a member variable for the pose of the body.\n (default = True)\n body_pose: torch.tensor, optional, Bx(Body Joints * 3)\n The default value for the body pose variable.\n (default = None)\n create_betas: bool, optional\n Flag for creating a member variable for the shape space\n (default = True).\n betas: torch.tensor, optional, Bx10\n The default value for the shape member variable.\n (default = None)\n create_transl: bool, optional\n Flag for creating a member variable for the translation\n of the body. (default = True)\n transl: torch.tensor, optional, Bx3\n The default value for the transl variable.\n (default = None)\n dtype: torch.dtype, optional\n The data type for the created variables\n batch_size: int, optional\n The batch size used for creating the member variables\n joint_mapper: object, optional\n An object that re-maps the joints. Useful if one wants to\n re-order the SMPL joints to some other convention (e.g. MSCOCO)\n (default = None)\n gender: str, optional\n Which gender to load\n vertex_ids: dict, optional\n A dictionary containing the indices of the extra vertices that\n will be selected\n \"\"\"\n super(SMPL_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPL_BONE_ORDER_NAMES\n\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"x\", \"y\", \"z\"] for x in self.joint_names}\n self.joint_range = {\n x: np.hstack([np.ones([3, 1]) * -np.pi,\n np.ones([3, 1]) * np.pi])\n for x in self.joint_names\n }\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n self.joint_range[\"L_Shoulder\"] *= 4\n self.joint_range[\"R_Shoulder\"] *= 4\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n\n # self.contype = {\n # 3: ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee','R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Neck', 'Head','L_Thorax', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Elbow', 'R_Wrist', 'R_Hand'],\n # 1: ['Chest', \"L_Shoulder\", \"R_Shoulder\"]\n # }\n\n # self.conaffinity = {\n # 1: ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee','R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Neck', 'Head','L_Thorax', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Elbow', 'R_Wrist', 'R_Hand'],\n # 3: ['Chest', \"L_Shoulder\", \"R_Shoulder\"]\n # }\n\n self.zero_pose = torch.zeros(1, 72).float()\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPL_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None):\n \"\"\"\n Pose should be batch_size x 72\n \"\"\"\n if pose.shape[1] != 72:\n pose = pose.reshape(-1, 72)\n\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n\n if th_betas.shape[-1] == 16:\n th_betas = th_betas[:, :10]\n\n batch_size = pose.shape[0]\n\n smpl_output = self.forward(\n betas=th_betas,\n transl=th_trans,\n body_pose=pose[:, 3:],\n global_orient=pose[:, :3],\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints[:, :24]\n # joints = smpl_output.joints[:,JOINST_TO_USE]\n return vertices, joints\n\n def get_offsets(self, zero_pose=None, betas=torch.zeros(1, 10).float()):\n with torch.no_grad():\n if zero_pose is None:\n verts, Jtr = self.get_joints_verts(self.zero_pose,\n th_betas=betas)\n else:\n verts, Jtr = self.get_joints_verts(zero_pose, th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n jts_np = Jtr.detach().cpu().numpy()\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n joint_names = self.joint_names\n joint_pos = Jtr[0].numpy()\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_offsets = {\n joint_names[c]:\n (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n }\n parents_dict = {\n joint_names[i]: joint_names[parents[i]]\n for i in range(len(joint_names))\n }\n channels = [\"z\", \"y\", \"x\"]\n skin_weights = self.lbs_weights.numpy()\n return (verts[0], jts_np[0], skin_weights, self.joint_names,\n joint_offsets, parents_dict, channels, self.joint_range)\n\n def get_mesh_offsets(self,\n zero_pose=None,\n betas=torch.zeros(1, 10),\n flatfoot=False):\n with torch.no_grad():\n joint_names = self.joint_names\n if zero_pose is None:\n verts, Jtr = self.get_joints_verts(self.zero_pose,\n th_betas=betas)\n else:\n verts, Jtr = self.get_joints_verts(zero_pose, th_betas=betas)\n\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n\n joint_pos = Jtr[0].numpy()\n joint_offsets = {\n joint_names[c]:\n (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n }\n\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )\n\n def get_mesh_offsets_batch(self, betas=torch.zeros(1, 10), flatfoot=False):\n with torch.no_grad():\n joint_names = self.joint_names\n verts, Jtr = self.get_joints_verts(self.zero_pose.repeat(\n betas.shape[0], 1),\n th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n\n joint_pos = Jtr\n joint_offsets = {\n joint_names[c]:\n (joint_pos[:, c] - joint_pos[:, p]) if c > 0 else joint_pos[:,\n c]\n for c, p in enumerate(smpl_joint_parents)\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n }\n\n skin_weights = self.lbs_weights\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )" }, { "identifier": "SMPLH_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPLH_Parser(_SMPLH):\n def __init__(self, *args, **kwargs):\n super(SMPLH_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPLH_BONE_ORDER_NAMES\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"z\", \"y\", \"x\"] for x in self.joint_names}\n self.joint_range = {\n x: np.hstack([np.ones([3, 1]) * -np.pi,\n np.ones([3, 1]) * np.pi])\n for x in self.joint_names\n }\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n # import ipdb\n # ipdb.set_trace()\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n self.zero_pose = torch.zeros(1, 156).float()\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPLH_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None):\n \"\"\"\n Pose should be batch_size x 156\n \"\"\"\n\n if pose.shape[1] != 156:\n pose = pose.reshape(-1, 156)\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n\n batch_size = pose.shape[0]\n smpl_output = self.forward(\n body_pose=pose[:, 3:66],\n global_orient=pose[:, :3],\n L_hand_pose=pose[:, 66:111],\n R_hand_pose=pose[:, 111:156],\n betas=th_betas,\n transl=th_trans,\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints\n # joints = smpl_output.joints[:,JOINST_TO_USE]\n return vertices, joints\n\n def get_offsets(self, betas=torch.zeros(1, 16).float()):\n with torch.no_grad():\n verts, jts = self.get_joints_verts(self.zero_pose, th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n jts_np = jts.detach().cpu().numpy()\n\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n names_smpl = self.joint_names\n offset_smpl_dict = {\n names_smpl[i]: offsets_smpl[i]\n for i in range(len(names_smpl))\n }\n parents_dict = {\n names_smpl[i]: names_smpl[parents[i]]\n for i in range(len(names_smpl))\n }\n parents_dict[\"Hips\"] = \"None\"\n channels = [\"z\", \"y\", \"x\"]\n\n return offset_smpl_dict, parents_dict, channels\n\n def get_mesh_offsets(self, betas=torch.zeros(1, 16), flatfoot=False):\n with torch.no_grad():\n joint_names = self.joint_names\n verts, Jtr = self.get_joints_verts(self.zero_pose, th_betas=betas)\n\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_pos = Jtr[0].numpy()\n joint_offsets = {\n joint_names[c]:\n (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n }\n\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )" }, { "identifier": "SMPLX_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPLX_Parser(_SMPLX):\n def __init__(self, *args, **kwargs):\n super(SMPLX_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPLH_BONE_ORDER_NAMES\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"z\", \"y\", \"x\"] for x in self.joint_names}\n self.joint_range = {\n x: np.hstack([np.ones([3, 1]) * -np.pi,\n np.ones([3, 1]) * np.pi])\n for x in self.joint_names\n }\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n # import ipdb\n # ipdb.set_trace()\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n self.zero_pose = torch.zeros(1, 156).float()\n self.joint_to_use = [\n SMPLX_BONE_ORDER_NAMES.index(i) for i in SMPLH_BONE_ORDER_NAMES\n ]\n self.parents_to_use = np.concatenate(\n [np.arange(0, 22), np.arange(25, 55)])\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPLX_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None):\n \"\"\"\n Pose should be batch_size x 156\n \"\"\"\n\n if pose.shape[1] != 156:\n pose = pose.reshape(-1, 156)\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n\n batch_size = pose.shape[0]\n smpl_output = self.forward(\n body_pose=pose[:, 3:66],\n global_orient=pose[:, :3],\n left_hand_pose=pose[:, 66:111],\n right_hand_pose=pose[:, 111:156],\n betas=th_betas,\n transl=th_trans,\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints\n # return vertices, joints\n return vertices, joints\n\n def get_offsets(self, v_template=None):\n if not v_template is None:\n self.v_template = v_template\n with torch.no_grad():\n verts, jts = self.get_joints_verts(self.zero_pose)\n verts_np = verts.detach().cpu().numpy()\n jts_np = jts.detach().cpu().numpy()\n\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n names_smpl = self.joint_names\n offset_smpl_dict = {\n names_smpl[i]: offsets_smpl[i]\n for i in range(len(names_smpl))\n }\n parents_dict = {\n names_smpl[i]: names_smpl[parents[i]]\n for i in range(len(names_smpl))\n }\n parents_dict[\"Hips\"] = \"None\"\n channels = [\"z\", \"y\", \"x\"]\n return offset_smpl_dict, parents_dict, channels\n\n def get_mesh_offsets(self, v_template=None):\n if not v_template is None:\n self.v_template = v_template\n with torch.no_grad():\n # joint_names = self.joint_names\n joint_names = SMPLX_BONE_ORDER_NAMES\n verts, Jtr = self.get_joints_verts(self.zero_pose)\n\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_pos = Jtr[0].numpy()\n # print(\n # joint_pos.shape,\n # smpl_joint_parents.shape,\n # len(self.parents_to_use),\n # self.parents.cpu().numpy().shape,\n # )\n joint_offsets = {\n joint_names[c]:\n (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n if joint_names[c] in self.joint_names\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n if joint_names[i] in self.joint_names\n }\n\n verts = verts[0].numpy()\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()[:, self.parents_to_use]\n return (\n verts,\n joint_pos,\n skin_weights,\n self.joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )" } ]
import torch import glob import os import sys import pdb import os.path as osp import joblib import pytorch3d.transforms as tR import autograd.numpy as np import time import ipdb from uhc.utils.torch_ext import dict_to_torch from uhc.utils.torch_utils import * from uhc.utils.transform_utils import * from scipy.spatial.transform import Rotation as sRot from uhc.smpllib.smpl_mujoco import SMPLConverter, smpl_to_qpose, smpl_to_qpose_torch, SMPL_BONE_ORDER_NAMES from uhc.smpllib.smpl_parser import SMPL_EE_NAMES from uhc.utils.tools import get_expert, get_expert_master from uhc.smpllib.smpl_parser import ( SMPL_Parser, SMPLH_Parser, SMPLX_Parser, ) from autograd import elementwise_grad as egrad from uhc.smpllib.smpl_robot import Robot from uhc.smpllib.torch_smpl_humanoid import Humanoid from uhc.utils.config_utils.copycat_config import Config from uhc.data_loaders.dataset_amass_single import DatasetAMASSSingle from uhc.utils.torch_ext import dict_to_torch from uhc.smpllib.smpl_mujoco import smpl_to_qpose_torch, smplh_to_smpl
11,852
pred_joints2d = pred_joints3d @ (self.K.T) z = pred_joints2d[:, :, 2:] pred_joints2d = pred_joints2d[:, :, :2] / z pred_joints2d = smpl_op_to_op(pred_joints2d) if return_cam_3d: return pred_joints2d, pred_joints3d else: return pred_joints2d def proj_2d_line_loss(self, input_vec): wbpos = self.fk_batch_grad(input_vec) _, pred_joints3d = self.proj2d(wbpos, return_cam_3d=True) dist = np.cross(pred_joints3d[0], pred_joints3d[0] - self.camera_rays)**2 return dist.mean() def proj_2d_loss(self, input_vec, ord=2, normalize = True): wbpos = self.fk_batch_grad(input_vec) pred_joints2d = self.proj2d(wbpos) curr_weighting = np.array(self.weighting) if normalize: pred_joints2d = normalize_screen_coordinates(pred_joints2d, self.img_w, self.img_h) gt_2d_joints = self.gt_2d_joints_norm else: gt_2d_joints = self.gt_2d_joints if ord == 1: loss = np.abs( gt_2d_joints[self.inliers] - pred_joints2d.squeeze()[self.inliers]).squeeze().mean() else: diff = (gt_2d_joints - pred_joints2d.squeeze())**2 curr_weighting[~self.inliers] = 0 loss = (diff * curr_weighting).sum(axis=0).mean() return loss def proj_2d_body_loss(self, input_vec, ord=2, normalize = False): # Has to use the current translation (to roughly put at the same position, and then zero out the translation) wbpos = self.fk_batch_grad(input_vec) pred_joints2d = self.proj2d(wbpos) gt2d_center = self.gt_2d_joints[..., 7:8, :].copy() pred_joints2d += (gt2d_center - pred_joints2d[..., 7:8, :]) curr_weighting = np.array(self.weighting) if normalize: pred_joints2d = normalize_screen_coordinates(pred_joints2d, self.img_w, self.img_h) gt_2d_joints = self.gt_2d_joints_norm else: gt_2d_joints = self.gt_2d_joints if ord == 1: loss = np.abs(gt_2d_joints[self.inliers] - pred_joints2d.squeeze()[self.inliers]).squeeze().mean() else: diff = (gt_2d_joints - pred_joints2d.squeeze())**2 curr_weighting[~self.inliers] = 0 loss = (diff * curr_weighting).sum(axis=0).mean() return loss def proj_2d_root_loss(self, root_pos_rot): input_vec = np.concatenate( [root_pos_rot.reshape([1, 1, 6]), np.zeros([1, 1, 69])], axis=2) wbpos = self.fk_batch_grad(input_vec) pred_joints2d = self.proj2d(wbpos) return np.abs(self.gt_2d_joints[7:8] - pred_joints2d.squeeze()[7:8]).squeeze().mean() def fk_batch(self, pose, trans, convert_to_mat=True, count_offset=True): pose, trans = pose.cpu().numpy(), trans.cpu().numpy() B, seq_len = pose.shape[:2] if convert_to_mat: pose_mat = rodrigues(pose.reshape(B * seq_len * 24, 1, 3)).reshape( B, seq_len, -1, 3, 3) else: pose_mat = pose if pose_mat.shape != 5: pose_mat = pose_mat.reshape(B, seq_len, -1, 3, 3) J = pose_mat.shape[2] - 1 # Exclude root if count_offset: trans = trans + self._offsets[:, 0:1] pose_mat_ordered = pose_mat[:, :, self.smpl_index] wbody_pos, wbody_mat = self.forward_kinematics_batch( pose_mat_ordered[:, :, 1:], pose_mat_ordered[:, :, 0:1], trans) return_dic = {} return_dic["wbpos"] = wbody_pos return_dic["wbmat"] = wbody_mat return return_dic def fk_batch_grad(self, input_vec, count_offset=True): trans, pose = input_vec[:, :, :3], input_vec[:, :, 3:] B, seq_len = pose.shape[:2] pose_mat = rodrigues(pose.reshape(-1, 1, 3)).reshape(B, seq_len, -1, 3, 3) # pose_mat = [ # rodrigues_vec_to_rotation_mat(a) for a in pose.reshape(-1, 3) # ] # pose_mat = np.stack(pose_mat).reshape(B, seq_len, -1, 3, 3) J = pose_mat.shape[2] - 1 # Exclude root if count_offset: trans = trans + self._offsets[:, 0:1] pose_mat_ordered = pose_mat[:, :, self.smpl_index] wbody_pos, wbody_mat = self.forward_kinematics_batch( pose_mat_ordered[:, :, 1:], pose_mat_ordered[:, :, 0:1], trans) return wbody_pos def get_ee_pos(self, body_xpos, root_q, transform):
# import numpy as np sys.path.append(os.getcwd()) def smpl_op_to_op(pred_joints2d): new_2d = np.concatenate([pred_joints2d[..., [1, 4], :].mean(axis = -2, keepdims = True), \ pred_joints2d[..., 1:7, :], \ pred_joints2d[..., [7, 8, 11], :].mean(axis = -2, keepdims = True), \ pred_joints2d[..., 9:11, :], \ pred_joints2d[..., 12:, :]], \ axis = -2) return new_2d def normalize_screen_coordinates(X, w=1920, h=1080): assert X.shape[-1] == 2 # Normalize so that [0, w] is mapped to # [-1, 1], while preserving the aspect ratio return X / w * 2 - np.array([1, h / w]) def rodrigues(r): """ Rodrigues' rotation formula that turns axis-angle vector into rotation matrix in a batch-ed manner. Parameter: ---------- r: Axis-angle rotation vector of shape [batch_size, 1, 3]. Return: ------- Rotation matrix of shape [batch_size, 3, 3]. """ theta = np.linalg.norm(r, axis=(1, 2))[:, None, None] # avoid zero divide theta = np.maximum(theta, np.finfo(r.dtype).eps) r_hat = r / theta cos = np.cos(theta) z_stick = np.zeros(theta.shape[0]) m = np.stack([ z_stick, -r_hat[:, 0, 2], r_hat[:, 0, 1], r_hat[:, 0, 2], z_stick, -r_hat[:, 0, 0], -r_hat[:, 0, 1], r_hat[:, 0, 0], z_stick ], axis=1).reshape([-1, 3, 3]) i_cube = np.broadcast_to(np.expand_dims(np.eye(3), axis=0), [theta.shape[0], 3, 3]) A = np.transpose(r_hat, axes=[0, 2, 1]) B = r_hat dot = np.matmul(A, B) R = cos * i_cube + (1 - cos) * dot + np.sin(theta) * m return R def rodrigues_vec_to_rotation_mat(rot): theta = np.linalg.norm(rot, axis=0) if theta < sys.float_info.epsilon: rotation_mat = np.eye(3, dtype=float) else: rot = rot / theta I = np.eye(3, dtype=float) r_rT = np.array([[rot[0] * rot[0], rot[0] * rot[1], rot[0] * rot[2]], [rot[1] * rot[0], rot[1] * rot[1], rot[1] * rot[2]], [rot[2] * rot[0], rot[2] * rot[1], rot[2] * rot[2]]]) r_cross = np.array([[0, -rot[2], rot[1]], [rot[2], 0, -rot[0]], [-rot[1], rot[0], 0]]) rotation_mat = np.cos(theta) * I + ( 1 - np.cos(theta)) * r_rT + np.sin(theta) * r_cross return rotation_mat class Humanoid_Batch: def __init__(self, smpl_model="smpl", data_dir="data/smpl"): self.smpl_model = smpl_model if self.smpl_model == "smpl": self.smpl_parser_n = SMPL_Parser(model_path=data_dir, gender="neutral") self.smpl_parser_m = SMPL_Parser(model_path=data_dir, gender="male") self.smpl_parser_f = SMPL_Parser(model_path=data_dir, gender="female") elif self.smpl_model == "smplh": self.smpl_parser_n = SMPLH_Parser( model_path=data_dir, gender="neutral", use_pca=False, create_transl=False, ) self.smpl_parser_m = SMPLH_Parser(model_path=data_dir, gender="male", use_pca=False, create_transl=False) self.smpl_parser_f = SMPLH_Parser(model_path=data_dir, gender="female", use_pca=False, create_transl=False) elif self.smpl_model == "smplx": self.smpl_parser_n = SMPLX_Parser( model_path=data_dir, gender="neutral", use_pca=False, create_transl=False, ) self.smpl_parser_m = SMPLX_Parser(model_path=data_dir, gender="male", use_pca=False, create_transl=False) self.smpl_parser_f = SMPLX_Parser(model_path=data_dir, gender="female", use_pca=False, create_transl=False) self.model_names = [ 'Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee', 'R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Chest', 'Neck', 'Head', 'L_Thorax', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'R_Hand' ] self._parents = [ -1, 0, 1, 2, 3, 0, 5, 6, 7, 0, 9, 10, 11, 12, 11, 14, 15, 16, 17, 11, 19, 20, 21, 22 ] self.smpl_index = [ SMPL_BONE_ORDER_NAMES.index(i) for i in self.model_names ] def update_model(self, betas, gender): betas, gender = betas.cpu().float(), gender.cpu().long() B, _ = betas.shape betas_f = betas[gender == 2] if len(betas_f) > 0: _, _, _, _, joint_offsets_f, _, _, _, _, _, _, = self.smpl_parser_f.get_mesh_offsets_batch( betas=betas_f[:, :10]) betas_n = betas[gender == 0] if len(betas_n) > 0: _, _, _, _, joint_offsets_n, _, _, _, _, _, _, = self.smpl_parser_n.get_mesh_offsets_batch( betas=betas_n[:, :10]) betas_m = betas[gender == 1] if len(betas_m) > 0: _, _, _, _, joint_offsets_m, _, _, _, _, _, _, = self.smpl_parser_m.get_mesh_offsets_batch( betas=betas_m[:, :10]) joint_offsets_all = dict() for n in SMPL_BONE_ORDER_NAMES: joint_offsets_all[n] = torch.zeros([B, 3]).float() if len(betas_f) > 0: joint_offsets_all[n][gender == 2] = joint_offsets_f[n] if len(betas_n) > 0: joint_offsets_all[n][gender == 0] = joint_offsets_n[n] if len(betas_m) > 0: joint_offsets_all[n][gender == 1] = joint_offsets_m[n] off_sets = [] for n in self.model_names: off_sets.append(joint_offsets_all[n]) # self._offsets = torch.from_numpy(np.stack(off_sets, axis=1)) self._offsets = np.round(np.stack(off_sets, axis=1), decimals=5) self.trans2joint = -self._offsets[:, 0:1] self.trans2joint[:, :, 2] = 0 # self._offsets = joblib.load("curr_offset.pkl")[None, ] def update_projection(self, cam_params, smpl2op_map, MUJOCO_2_SMPL): self.full_R = cam_params['full_R'] self.full_t = cam_params['full_t'] self.K = cam_params['K'] self.img_w = cam_params['img_w'] self.img_h = cam_params['img_h'] self.openpose_subindex = smpl2op_map < 22 self.smpl2op_map = smpl2op_map self.smpl2op_partial = self.smpl2op_map[self.openpose_subindex] self.MUJOCO_2_SMPL = MUJOCO_2_SMPL def update_tgt_joints(self, tgt_joints, inliers): self.gt_2d_joints = tgt_joints self.inliers = inliers.astype(bool) num_joints = self.gt_2d_joints.shape[-2] self.gt_2d_joints_norm = normalize_screen_coordinates(self.gt_2d_joints, self.img_w, self.img_h) self.num_frames = self.gt_2d_joints.shape[0] self.camera_rays = np.concatenate([self.gt_2d_joints, np.ones([self.num_frames, num_joints, 1])], axis=2).dot(np.linalg.inv(self.K).T) self.camera_rays /= np.linalg.norm(self.camera_rays, axis=2)[..., None] lam = 0.3 self.weighting = np.exp(lam * -np.arange(self.num_frames)) / np.sum( np.exp(lam * -np.arange(self.num_frames))) self.weighting = np.tile(self.weighting[:, None, None], [1, num_joints, 2]) # self.weighting = np.ones(self.num_frames) / self.num_frames def proj2d(self, wbpos, return_cam_3d=False): # wbpos in mujoco pred_joints3d = wbpos.squeeze()[self.MUJOCO_2_SMPL][ self.smpl2op_partial][None, ] pred_joints3d = pred_joints3d @ self.full_R.T + self.full_t pred_joints2d = pred_joints3d @ (self.K.T) z = pred_joints2d[:, :, 2:] pred_joints2d = pred_joints2d[:, :, :2] / z pred_joints2d = smpl_op_to_op(pred_joints2d) if return_cam_3d: return pred_joints2d, pred_joints3d else: return pred_joints2d def proj_2d_line_loss(self, input_vec): wbpos = self.fk_batch_grad(input_vec) _, pred_joints3d = self.proj2d(wbpos, return_cam_3d=True) dist = np.cross(pred_joints3d[0], pred_joints3d[0] - self.camera_rays)**2 return dist.mean() def proj_2d_loss(self, input_vec, ord=2, normalize = True): wbpos = self.fk_batch_grad(input_vec) pred_joints2d = self.proj2d(wbpos) curr_weighting = np.array(self.weighting) if normalize: pred_joints2d = normalize_screen_coordinates(pred_joints2d, self.img_w, self.img_h) gt_2d_joints = self.gt_2d_joints_norm else: gt_2d_joints = self.gt_2d_joints if ord == 1: loss = np.abs( gt_2d_joints[self.inliers] - pred_joints2d.squeeze()[self.inliers]).squeeze().mean() else: diff = (gt_2d_joints - pred_joints2d.squeeze())**2 curr_weighting[~self.inliers] = 0 loss = (diff * curr_weighting).sum(axis=0).mean() return loss def proj_2d_body_loss(self, input_vec, ord=2, normalize = False): # Has to use the current translation (to roughly put at the same position, and then zero out the translation) wbpos = self.fk_batch_grad(input_vec) pred_joints2d = self.proj2d(wbpos) gt2d_center = self.gt_2d_joints[..., 7:8, :].copy() pred_joints2d += (gt2d_center - pred_joints2d[..., 7:8, :]) curr_weighting = np.array(self.weighting) if normalize: pred_joints2d = normalize_screen_coordinates(pred_joints2d, self.img_w, self.img_h) gt_2d_joints = self.gt_2d_joints_norm else: gt_2d_joints = self.gt_2d_joints if ord == 1: loss = np.abs(gt_2d_joints[self.inliers] - pred_joints2d.squeeze()[self.inliers]).squeeze().mean() else: diff = (gt_2d_joints - pred_joints2d.squeeze())**2 curr_weighting[~self.inliers] = 0 loss = (diff * curr_weighting).sum(axis=0).mean() return loss def proj_2d_root_loss(self, root_pos_rot): input_vec = np.concatenate( [root_pos_rot.reshape([1, 1, 6]), np.zeros([1, 1, 69])], axis=2) wbpos = self.fk_batch_grad(input_vec) pred_joints2d = self.proj2d(wbpos) return np.abs(self.gt_2d_joints[7:8] - pred_joints2d.squeeze()[7:8]).squeeze().mean() def fk_batch(self, pose, trans, convert_to_mat=True, count_offset=True): pose, trans = pose.cpu().numpy(), trans.cpu().numpy() B, seq_len = pose.shape[:2] if convert_to_mat: pose_mat = rodrigues(pose.reshape(B * seq_len * 24, 1, 3)).reshape( B, seq_len, -1, 3, 3) else: pose_mat = pose if pose_mat.shape != 5: pose_mat = pose_mat.reshape(B, seq_len, -1, 3, 3) J = pose_mat.shape[2] - 1 # Exclude root if count_offset: trans = trans + self._offsets[:, 0:1] pose_mat_ordered = pose_mat[:, :, self.smpl_index] wbody_pos, wbody_mat = self.forward_kinematics_batch( pose_mat_ordered[:, :, 1:], pose_mat_ordered[:, :, 0:1], trans) return_dic = {} return_dic["wbpos"] = wbody_pos return_dic["wbmat"] = wbody_mat return return_dic def fk_batch_grad(self, input_vec, count_offset=True): trans, pose = input_vec[:, :, :3], input_vec[:, :, 3:] B, seq_len = pose.shape[:2] pose_mat = rodrigues(pose.reshape(-1, 1, 3)).reshape(B, seq_len, -1, 3, 3) # pose_mat = [ # rodrigues_vec_to_rotation_mat(a) for a in pose.reshape(-1, 3) # ] # pose_mat = np.stack(pose_mat).reshape(B, seq_len, -1, 3, 3) J = pose_mat.shape[2] - 1 # Exclude root if count_offset: trans = trans + self._offsets[:, 0:1] pose_mat_ordered = pose_mat[:, :, self.smpl_index] wbody_pos, wbody_mat = self.forward_kinematics_batch( pose_mat_ordered[:, :, 1:], pose_mat_ordered[:, :, 0:1], trans) return wbody_pos def get_ee_pos(self, body_xpos, root_q, transform):
ee_name = SMPL_EE_NAMES
2
2023-10-31 20:47:12+00:00
16k
Improbable-AI/dexenv
dexenv/envs/dclaw_base.py
[ { "identifier": "VecTask", "path": "dexenv/envs/base/vec_task.py", "snippet": "class VecTask(Env):\n\n def __init__(self, config, sim_device, rl_device, graphics_device_id, headless):\n \"\"\"Initialise the `VecTask`.\n Args:\n config: config dictionary for the environment.\n sim_device: the device to simulate physics on. eg. 'cuda:0' or 'cpu'\n graphics_device_id: the device ID to render with.\n headless: Set to False to disable viewer rendering.\n \"\"\"\n super().__init__(config, sim_device, rl_device, graphics_device_id, headless)\n\n self.sim_params = self.__parse_sim_params(self.cfg[\"physics_engine\"], self.cfg[\"sim\"])\n if self.cfg[\"physics_engine\"] == \"physx\":\n self.physics_engine = gymapi.SIM_PHYSX\n elif self.cfg[\"physics_engine\"] == \"flex\":\n self.physics_engine = gymapi.SIM_FLEX\n else:\n msg = f\"Invalid physics engine backend: {self.cfg['physics_engine']}\"\n raise ValueError(msg)\n\n # optimization flags for pytorch JIT\n torch._C._jit_set_profiling_mode(False)\n torch._C._jit_set_profiling_executor(False)\n\n self.gym = gymapi.acquire_gym()\n\n self.first_randomization = True\n self.original_props = {}\n self.dr_randomizations = {}\n self.actor_params_generator = None\n self.extern_actor_params = {}\n self.last_step = -1\n self.last_rand_step = -1\n for env_id in range(self.num_envs):\n self.extern_actor_params[env_id] = None\n\n # create envs, sim and viewer\n self.sim_initialized = False\n self.create_sim()\n self.gym.prepare_sim(self.sim)\n self.sim_initialized = True\n\n self.set_viewer()\n self.allocate_buffers()\n\n self.obs_dict = {}\n\n def set_viewer(self):\n \"\"\"Create the viewer.\"\"\"\n\n # todo: read from config\n self.enable_viewer_sync = True\n self.viewer = None\n\n # if running with a viewer, set up keyboard shortcuts and camera\n if self.headless == False:\n # subscribe to keyboard shortcuts\n self.viewer = self.gym.create_viewer(\n self.sim, gymapi.CameraProperties())\n self.gym.subscribe_viewer_keyboard_event(\n self.viewer, gymapi.KEY_ESCAPE, \"QUIT\")\n self.gym.subscribe_viewer_keyboard_event(\n self.viewer, gymapi.KEY_V, \"toggle_viewer_sync\")\n\n # set the camera position based on up axis\n sim_params = self.gym.get_sim_params(self.sim)\n if sim_params.up_axis == gymapi.UP_AXIS_Z:\n cam_pos = gymapi.Vec3(20.0, 25.0, 3.0)\n cam_target = gymapi.Vec3(10.0, 15.0, 0.0)\n else:\n cam_pos = gymapi.Vec3(20.0, 3.0, 25.0)\n cam_target = gymapi.Vec3(10.0, 0.0, 15.0)\n\n self.gym.viewer_camera_look_at(\n self.viewer, None, cam_pos, cam_target)\n\n def allocate_buffers(self):\n \"\"\"Allocate the observation, states, etc. buffers.\n These are what is used to set observations and states in the environment classes which\n inherit from this one, and are read in `step` and other related functions.\n \"\"\"\n\n # allocate buffers\n self.allocate_ob_buffers()\n self.rew_buf = torch.zeros(\n self.num_envs, device=self.device, dtype=torch.float)\n self.done_buf = torch.zeros(\n self.num_envs, device=self.device, dtype=torch.long)\n self.reset_buf = torch.ones(\n self.num_envs, device=self.device, dtype=torch.long)\n self.timeout_buf = torch.zeros(\n self.num_envs, device=self.device, dtype=torch.long)\n self.progress_buf = torch.zeros(\n self.num_envs, device=self.device, dtype=torch.long)\n self.randomize_buf = torch.zeros(\n self.num_envs, device=self.device, dtype=torch.long)\n self.extras = {}\n\n def allocate_ob_buffers(self):\n self.obs_buf = torch.zeros(\n (self.num_envs, self.num_obs), device=self.device, dtype=torch.float)\n self.states_buf = torch.zeros(\n (self.num_envs, self.num_states), device=self.device, dtype=torch.float)\n\n #\n def set_sim_params_up_axis(self, sim_params: gymapi.SimParams, axis: str) -> int:\n \"\"\"Set gravity based on up axis and return axis index.\n Args:\n sim_params: sim params to modify the axis for.\n axis: axis to set sim params for.\n Returns:\n axis index for up axis.\n \"\"\"\n if axis == 'z':\n sim_params.up_axis = gymapi.UP_AXIS_Z\n sim_params.gravity.x = 0\n sim_params.gravity.y = 0\n sim_params.gravity.z = -9.81\n return 2\n return 1\n\n def create_sim(self, compute_device: int, graphics_device: int, physics_engine, sim_params: gymapi.SimParams):\n \"\"\"Create an Isaac Gym sim object.\n Args:\n compute_device: ID of compute device to use.\n graphics_device: ID of graphics device to use.\n physics_engine: physics engine to use (`gymapi.SIM_PHYSX` or `gymapi.SIM_FLEX`)\n sim_params: sim params to use.\n Returns:\n the Isaac Gym sim object.\n \"\"\"\n sim = self.gym.create_sim(compute_device, graphics_device, physics_engine, sim_params)\n if sim is None:\n print(\"*** Failed to create sim\")\n quit()\n\n return sim\n\n def get_state(self):\n \"\"\"Returns the state buffer of the environment (the priviledged observations for asymmetric training).\"\"\"\n return torch.clamp(self.states_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)\n\n @abc.abstractmethod\n def pre_physics_step(self, actions: torch.Tensor):\n \"\"\"Apply the actions to the environment (eg by setting torques, position targets).\n Args:\n actions: the actions to apply\n \"\"\"\n\n @abc.abstractmethod\n def post_physics_step(self):\n \"\"\"Compute reward and observations, reset any environments that require it.\"\"\"\n\n def step(self, actions: torch.Tensor) -> Tuple[Dict[str, torch.Tensor], torch.Tensor, torch.Tensor, Dict[str, Any]]:\n \"\"\"Step the physics of the environment.\n Args:\n actions: actions to apply\n Returns:\n Observations, rewards, resets, info\n Observations are dict of observations (currently only one member called 'obs')\n \"\"\"\n self.raw_actions_from_policy = actions.clone()\n # randomize actions\n if self.dr_randomizations.get('actions', None):\n actions = self.dr_randomizations['actions']['noise_lambda'](actions)\n action_tensor = torch.clamp(actions, -self.clip_actions, self.clip_actions)\n # apply actions\n self.pre_physics_step(action_tensor)\n\n # # step physics and render each frame\n for i in range(self.control_freq_inv):\n self.render()\n self.gym.simulate(self.sim)\n # to fix!\n if self.device == 'cpu':\n self.gym.fetch_results(self.sim, True)\n\n # fill time out buffer\n self.timeout_buf = torch.where(self.progress_buf >= self.max_episode_length - 1, torch.ones_like(self.timeout_buf),\n torch.zeros_like(self.timeout_buf))\n\n # compute observations, rewards, resets, ...\n self.post_physics_step()\n\n self.extras[\"time_outs\"] = self.timeout_buf.to(self.rl_device)\n return self.update_obs(), self.rew_buf.to(self.rl_device), self.done_buf.to(self.rl_device), self.extras\n\n def update_obs(self):\n # randomize observations\n if self.dr_randomizations.get('observations', None):\n self.obs_buf = self.dr_randomizations['observations']['noise_lambda'](self.obs_buf)\n self.obs_dict[\"ob\"] = torch.clamp(self.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)\n\n # asymmetric actor-critic\n if self.num_states > 0:\n self.obs_dict[\"state\"] = self.get_state()\n return self.obs_dict\n\n def zero_actions(self) -> torch.Tensor:\n \"\"\"Returns a buffer with zero actions.\n Returns:\n A buffer of zero torch actions\n \"\"\"\n actions = torch.zeros([self.num_envs, self.num_actions], dtype=torch.float32, device=self.device)\n\n return actions\n\n ## original code from Nvidia\n def reset(self) -> torch.Tensor:\n \"\"\"Reset the environment.\n Returns:\n Observation dictionary\n \"\"\"\n zero_actions = self.zero_actions()\n\n # step the simulator\n self.step(zero_actions)\n\n return self.update_obs()\n\n def render(self):\n \"\"\"Draw the frame to the viewer, and check for keyboard events.\"\"\"\n if self.viewer:\n # check for window closed\n if self.gym.query_viewer_has_closed(self.viewer):\n sys.exit()\n\n # check for keyboard events\n for evt in self.gym.query_viewer_action_events(self.viewer):\n if evt.action == \"QUIT\" and evt.value > 0:\n sys.exit()\n elif evt.action == \"toggle_viewer_sync\" and evt.value > 0:\n self.enable_viewer_sync = not self.enable_viewer_sync\n\n # fetch results\n if self.device != 'cpu':\n self.gym.fetch_results(self.sim, True)\n\n # step graphics\n if self.enable_viewer_sync:\n self.gym.step_graphics(self.sim)\n self.gym.draw_viewer(self.viewer, self.sim, True)\n\n # Wait for dt to elapse in real time.\n # This synchronizes the physics simulation with the rendering rate.\n self.gym.sync_frame_time(self.sim)\n\n else:\n self.gym.poll_viewer_events(self.viewer)\n\n def __parse_sim_params(self, physics_engine: str, config_sim: Dict[str, Any]) -> gymapi.SimParams:\n \"\"\"Parse the config dictionary for physics stepping settings.\n Args:\n physics_engine: which physics engine to use. \"physx\" or \"flex\"\n config_sim: dict of sim configuration parameters\n Returns\n IsaacGym SimParams object with updated settings.\n \"\"\"\n sim_params = gymapi.SimParams()\n\n # check correct up-axis\n if config_sim[\"up_axis\"] not in [\"z\", \"y\"]:\n msg = f\"Invalid physics up-axis: {config_sim['up_axis']}\"\n print(msg)\n raise ValueError(msg)\n\n # assign general sim parameters\n sim_params.dt = config_sim[\"dt\"]\n sim_params.num_client_threads = config_sim.get(\"num_client_threads\", 0)\n sim_params.use_gpu_pipeline = config_sim[\"use_gpu_pipeline\"]\n sim_params.substeps = config_sim.get(\"substeps\", 2)\n\n # assign up-axis\n if config_sim[\"up_axis\"] == \"z\":\n sim_params.up_axis = gymapi.UP_AXIS_Z\n else:\n sim_params.up_axis = gymapi.UP_AXIS_Y\n\n # assign gravity\n sim_params.gravity = gymapi.Vec3(*config_sim[\"gravity\"])\n\n # configure physics parameters\n if physics_engine == \"physx\":\n # set the parameters\n if \"physx\" in config_sim:\n for opt in config_sim[\"physx\"].keys():\n if opt == \"contact_collection\":\n setattr(sim_params.physx, opt, gymapi.ContactCollection(config_sim[\"physx\"][opt]))\n else:\n setattr(sim_params.physx, opt, config_sim[\"physx\"][opt])\n else:\n # set the parameters\n if \"flex\" in config_sim:\n for opt in config_sim[\"flex\"].keys():\n setattr(sim_params.flex, opt, config_sim[\"flex\"][opt])\n\n # return the configured params\n return sim_params\n\n \"\"\"\n Domain Randomization methods\n \"\"\"\n\n def get_actor_params_info(self, dr_params: Dict[str, Any], env):\n \"\"\"Generate a flat array of actor params, their names and ranges.\n Returns:\n The array\n \"\"\"\n\n if \"actor_params\" not in dr_params:\n return None\n params = []\n names = []\n lows = []\n highs = []\n param_getters_map = get_property_getter_map(self.gym)\n for actor, actor_properties in dr_params[\"actor_params\"].items():\n handle = self.gym.find_actor_handle(env, actor)\n for prop_name, prop_attrs in actor_properties.items():\n if prop_name in ['color', 'scale']:\n continue # this is set randomly\n props = param_getters_map[prop_name](env, handle)\n if not isinstance(props, list):\n props = [props]\n for prop_idx, prop in enumerate(props):\n for attr, attr_randomization_params in prop_attrs.items():\n name = prop_name + '_' + str(prop_idx) + '_' + attr\n lo_hi = attr_randomization_params['range']\n distr = attr_randomization_params['distribution']\n if 'uniform' not in distr:\n lo_hi = (-1.0 * float('Inf'), float('Inf'))\n if isinstance(prop, np.ndarray):\n for attr_idx in range(prop[attr].shape[0]):\n params.append(prop[attr][attr_idx])\n names.append(name + '_' + str(attr_idx))\n lows.append(lo_hi[0])\n highs.append(lo_hi[1])\n else:\n params.append(getattr(prop, attr))\n names.append(name)\n lows.append(lo_hi[0])\n highs.append(lo_hi[1])\n return params, names, lows, highs\n\n def apply_randomizations(self, dr_params):\n rand_freq = dr_params.get(\"frequency\", 1)\n self.last_step = self.gym.get_frame_count(self.sim)\n if self.first_randomization:\n do_nonenv_randomize = True\n env_ids = list(range(self.num_envs))\n else:\n do_nonenv_randomize = (self.last_step - self.last_rand_step) >= rand_freq\n rand_envs = torch.where(self.randomize_buf >= rand_freq, torch.ones_like(self.randomize_buf), torch.zeros_like(self.randomize_buf))\n rand_envs = torch.logical_and(rand_envs, self.reset_buf)\n env_ids = torch.nonzero(rand_envs, as_tuple=False).squeeze(-1).tolist()\n self.randomize_buf[rand_envs] = 0\n\n if do_nonenv_randomize:\n self.last_rand_step = self.last_step\n\n param_setters_map = get_property_setter_map(self.gym)\n param_setter_defaults_map = get_default_setter_args(self.gym)\n param_getters_map = get_property_getter_map(self.gym)\n\n # On first iteration, check the number of buckets\n if self.first_randomization:\n check_buckets(self.gym, self.envs, dr_params)\n\n for nonphysical_param in [\"observations\", \"actions\"]:\n if nonphysical_param in dr_params and do_nonenv_randomize:\n dist = dr_params[nonphysical_param][\"distribution\"]\n op_type = dr_params[nonphysical_param][\"operation\"]\n sched_type = dr_params[nonphysical_param][\"schedule\"] if \"schedule\" in dr_params[nonphysical_param] else None\n sched_step = dr_params[nonphysical_param][\"schedule_steps\"] if \"schedule\" in dr_params[nonphysical_param] else None\n op = operator.add if op_type == 'additive' else operator.mul\n\n if sched_type == 'linear':\n sched_scaling = 1.0 / sched_step * \\\n min(self.last_step, sched_step)\n elif sched_type == 'constant':\n sched_scaling = 0 if self.last_step < sched_step else 1\n else:\n sched_scaling = 1\n\n if dist == 'gaussian':\n mu, var = dr_params[nonphysical_param][\"range\"]\n mu_corr, var_corr = dr_params[nonphysical_param].get(\"range_correlated\", [0., 0.])\n\n if op_type == 'additive':\n mu *= sched_scaling\n var *= sched_scaling\n mu_corr *= sched_scaling\n var_corr *= sched_scaling\n elif op_type == 'scaling':\n var = var * sched_scaling # scale up var over time\n mu = mu * sched_scaling + 1.0 * \\\n (1.0 - sched_scaling) # linearly interpolate\n\n var_corr = var_corr * sched_scaling # scale up var over time\n mu_corr = mu_corr * sched_scaling + 1.0 * \\\n (1.0 - sched_scaling) # linearly interpolate\n\n def noise_lambda(tensor, param_name=nonphysical_param):\n params = self.dr_randomizations[param_name]\n corr = params.get('corr', None)\n if corr is None:\n corr = torch.randn_like(tensor)\n params['corr'] = corr\n corr = corr * params['var_corr'] + params['mu_corr']\n return op(\n tensor, corr + torch.randn_like(tensor) * params['var'] + params['mu'])\n\n self.dr_randomizations[nonphysical_param] = {'mu': mu, 'var': var, 'mu_corr': mu_corr, 'var_corr': var_corr,\n 'noise_lambda': noise_lambda}\n\n elif dist == 'uniform':\n lo, hi = dr_params[nonphysical_param][\"range\"]\n lo_corr, hi_corr = dr_params[nonphysical_param].get(\"range_correlated\", [0., 0.])\n\n if op_type == 'additive':\n lo *= sched_scaling\n hi *= sched_scaling\n lo_corr *= sched_scaling\n hi_corr *= sched_scaling\n elif op_type == 'scaling':\n lo = lo * sched_scaling + 1.0 * (1.0 - sched_scaling)\n hi = hi * sched_scaling + 1.0 * (1.0 - sched_scaling)\n lo_corr = lo_corr * sched_scaling + 1.0 * (1.0 - sched_scaling)\n hi_corr = hi_corr * sched_scaling + 1.0 * (1.0 - sched_scaling)\n\n def noise_lambda(tensor, param_name=nonphysical_param):\n params = self.dr_randomizations[param_name]\n corr = params.get('corr', None)\n if corr is None:\n corr = torch.randn_like(tensor)\n params['corr'] = corr\n corr = corr * (params['hi_corr'] - params['lo_corr']) + params['lo_corr']\n return op(tensor, corr + torch.rand_like(tensor) * (params['hi'] - params['lo']) + params['lo'])\n\n self.dr_randomizations[nonphysical_param] = {'lo': lo, 'hi': hi, 'lo_corr': lo_corr, 'hi_corr': hi_corr,\n 'noise_lambda': noise_lambda}\n\n if \"sim_params\" in dr_params and do_nonenv_randomize:\n prop_attrs = dr_params[\"sim_params\"]\n prop = self.gym.get_sim_params(self.sim)\n\n if self.first_randomization:\n self.original_props[\"sim_params\"] = {\n attr: getattr(prop, attr) for attr in dir(prop)}\n\n for attr, attr_randomization_params in prop_attrs.items():\n apply_random_samples(\n prop, self.original_props[\"sim_params\"], attr, attr_randomization_params, self.last_step)\n\n self.gym.set_sim_params(self.sim, prop)\n extern_offsets = {}\n if self.actor_params_generator is not None:\n for env_id in env_ids:\n self.extern_actor_params[env_id] = \\\n self.actor_params_generator.sample()\n extern_offsets[env_id] = 0\n\n for actor, actor_properties in dr_params[\"actor_params\"].items():\n for env_id in env_ids:\n self.original_props.setdefault(env_id, dict())\n env = self.envs[env_id]\n handle = self.gym.find_actor_handle(env, actor)\n self.original_props[env_id].setdefault(handle, dict())\n extern_sample = self.extern_actor_params[env_id]\n\n for prop_name, prop_attrs in actor_properties.items():\n if prop_name == 'color':\n num_bodies = self.gym.get_actor_rigid_body_count(\n env, handle)\n for n in range(num_bodies):\n self.gym.set_rigid_body_color(env, handle, n, gymapi.MESH_VISUAL,\n gymapi.Vec3(random.uniform(0, 1),\n random.uniform(0, 1),\n random.uniform(0, 1)))\n continue\n if prop_name == 'scale':\n setup_only = prop_attrs.get('setup_only', False)\n if (setup_only and not self.sim_initialized) or not setup_only:\n attr_randomization_params = prop_attrs\n sample = generate_random_samples(attr_randomization_params, 1,\n self.last_step, None)\n og_scale = 1\n if attr_randomization_params['operation'] == 'scaling':\n new_scale = og_scale * sample\n elif attr_randomization_params['operation'] == 'additive':\n new_scale = og_scale + sample\n self.gym.set_actor_scale(env, handle, new_scale)\n continue\n\n prop = param_getters_map[prop_name](env, handle)\n set_random_properties = True\n if isinstance(prop, list):\n if self.first_randomization:\n self.original_props[env_id][handle][prop_name] = [\n {attr: getattr(p, attr) for attr in dir(p)} for p in prop]\n for attr, attr_randomization_params in prop_attrs.items():\n same_for_all = attr_randomization_params.get('same_for_all', False)\n setup_only = attr_randomization_params.get('setup_only', False)\n attr_sample = None\n assert len(prop) == len(self.original_props[env_id][handle][prop_name])\n for p, og_p in zip(prop, self.original_props[env_id][handle][prop_name]):\n if (setup_only and not self.sim_initialized) or not setup_only:\n smpl = None\n if self.actor_params_generator is not None:\n smpl, extern_offsets[env_id] = get_attr_val_from_sample(\n extern_sample, extern_offsets[env_id], p, attr)\n if same_for_all and attr_sample is not None:\n apply_prop_samples(p, og_p, attr, attr_randomization_params, attr_sample)\n else:\n attr_sample = apply_random_samples(\n p, og_p, attr, attr_randomization_params,\n self.last_step, smpl)\n else:\n set_random_properties = False\n else:\n if self.first_randomization:\n self.original_props[env_id][handle][prop_name] = deepcopy(prop)\n for attr, attr_randomization_params in prop_attrs.items():\n setup_only = attr_randomization_params.get('setup_only', False)\n if (setup_only and not self.sim_initialized) or not setup_only:\n smpl = None\n if self.actor_params_generator is not None:\n smpl, extern_offsets[env_id] = get_attr_val_from_sample(\n extern_sample, extern_offsets[env_id], prop, attr)\n apply_random_samples(\n prop, self.original_props[env_id][handle][prop_name], attr,\n attr_randomization_params, self.last_step, smpl)\n else:\n set_random_properties = False\n if set_random_properties:\n setter = param_setters_map[prop_name]\n default_args = param_setter_defaults_map[prop_name]\n setter(env, handle, prop, *default_args)\n\n if self.actor_params_generator is not None:\n for env_id in env_ids: # check that we used all dims in sample\n if extern_offsets[env_id] > 0:\n extern_sample = self.extern_actor_params[env_id]\n if extern_offsets[env_id] != extern_sample.shape[0]:\n print('env_id', env_id,\n 'extern_offset', extern_offsets[env_id],\n 'vs extern_sample.shape', extern_sample.shape)\n raise Exception(\"Invalid extern_sample size\")\n self.first_randomization = False\n return env_ids" }, { "identifier": "compute_dclaw_reward", "path": "dexenv/envs/rewards.py", "snippet": "@torch.no_grad()\ndef compute_dclaw_reward(reset_buf, reset_goal_buf, progress_buf,\n successes, max_episode_length: float,\n object_pos, object_rot, target_pos, target_rot,\n reward_cfg, actions,\n fingertip_pos=None, fingertip_vel=None,\n object_linvel=None, object_angvel=None, dof_vel=None,\n dof_torque=None, table_cf=None\n ):\n rot_reward_scale = reward_cfg.rotRewardScale\n rot_eps = reward_cfg.rotEps\n reach_goal_bonus = reward_cfg.reachGoalBonus\n fall_dist = reward_cfg.fallDistance\n fall_penalty = reward_cfg.fallPenalty\n success_tolerance = reward_cfg.successTolerance\n ftip_reward_scale = reward_cfg.ftipRewardScale\n penalize_tb_contact = reward_cfg.pen_tb_contact\n kwargs = dict(\n reset_buf=reset_buf,\n reset_goal_buf=reset_goal_buf,\n progress_buf=progress_buf,\n successes=successes,\n max_episode_length=max_episode_length,\n object_pos=object_pos,\n object_rot=object_rot,\n target_pos=target_pos,\n target_rot=target_rot,\n actions=actions,\n fingertip_pos=fingertip_pos,\n object_linvel=object_linvel,\n object_angvel=object_angvel,\n dof_vel=dof_vel,\n dof_torque=dof_torque,\n rot_reward_scale=rot_reward_scale,\n rot_eps=rot_eps,\n reach_goal_bonus=reach_goal_bonus,\n fall_dist=fall_dist,\n fall_penalty=fall_penalty,\n success_tolerance=success_tolerance,\n ftip_reward_scale=ftip_reward_scale,\n energy_scale=reward_cfg.energy_scale,\n dof_vel_thresh=reward_cfg.dof_vel_thresh,\n obj_lin_vel_thresh=reward_cfg.obj_lin_vel_thresh,\n obj_ang_vel_thresh=reward_cfg.obj_ang_vel_thresh,\n action_norm_thresh=reward_cfg.action_norm_thresh,\n penalize_tb_contact=penalize_tb_contact,\n table_cf=table_cf if table_cf is not None else torch.ones(1),\n tb_cf_scale=reward_cfg.tb_cf_scale,\n clip_energy_reward=reward_cfg.clip_energy_reward,\n energy_upper_bound=reward_cfg.energy_upper_bound,\n )\n out = compute_reward(**kwargs)\n return out" }, { "identifier": "get_module_path", "path": "dexenv/utils/common.py", "snippet": "def get_module_path(module):\n modu = importlib.util.find_spec(module)\n return Path(list(modu.submodule_search_locations)[0])" }, { "identifier": "pathlib_file", "path": "dexenv/utils/common.py", "snippet": "def pathlib_file(file_name):\n if isinstance(file_name, str):\n file_name = Path(file_name)\n elif not isinstance(file_name, Path):\n raise TypeError(f'Please check the type of the filename:{file_name}')\n return file_name" }, { "identifier": "dclaw_body_color_mapping", "path": "dexenv/utils/hand_color.py", "snippet": "FINGERTIP_COLORS = np.array([\n [111, 29, 27],\n [187, 148, 87],\n [67, 40, 24],\n [153, 88, 42],\n [255, 230, 167]\n]) / 255.0\nFINGERTIP_COLORS = FINGERTIP_COLORS.tolist()" }, { "identifier": "get_camera_params", "path": "dexenv/utils/isaac_utils.py", "snippet": "def get_camera_params(width=640, height=480, hov=75, cuda=True):\n camera_props = gymapi.CameraProperties()\n camera_props.horizontal_fov = hov\n camera_props.width = width\n camera_props.height = height\n camera_props.enable_tensors = cuda\n return camera_props" }, { "identifier": "random_quaternions", "path": "dexenv/utils/torch_utils.py", "snippet": "@torch.no_grad()\ndef random_quaternions(num, dtype=None, device=None, order='xyzw'):\n \"\"\"\n return quaternions in [w, x, y, z] or [x, y, z, w]\n \"\"\"\n if PYTORCH3D_AVAILABLE:\n quats = py3d_rot_cvt.random_quaternions(num, dtype=dtype, device=device)\n else:\n \"\"\"\n http://planning.cs.uiuc.edu/node198.html\n \"\"\"\n ran = torch.rand(num, 3, dtype=dtype, device=device)\n r1, r2, r3 = ran[:, 0], ran[:, 1], ran[:, 2]\n pi2 = 2 * np.pi\n r1_1 = torch.sqrt(1.0 - r1)\n r1_2 = torch.sqrt(r1)\n t1 = pi2 * r2\n t2 = pi2 * r3\n\n quats = torch.zeros(num, 4, dtype=dtype, device=device)\n quats[:, 0] = r1_1 * (torch.sin(t1))\n quats[:, 1] = r1_1 * (torch.cos(t1))\n quats[:, 2] = r1_2 * (torch.sin(t2))\n quats[:, 3] = r1_2 * (torch.cos(t2))\n\n assert order in ['xyzw', 'wxyz']\n if order == 'xyzw':\n quats = quat_wxyz_to_xyzw(quats)\n return quats" }, { "identifier": "torch_long", "path": "dexenv/utils/torch_utils.py", "snippet": "def torch_long(array, device='cpu'):\n if isinstance(array, torch.Tensor):\n return array.long().to(device)\n elif isinstance(array, np.ndarray):\n return torch.from_numpy(array).long().to(device)\n elif isinstance(array, list):\n return torch.LongTensor(array).to(device)\n elif isinstance(array, dict):\n new_dict = dict()\n for k, v in array.items():\n new_dict[k] = torch_long(v, device)\n return new_dict" } ]
import time import torch import dexenv from isaacgym import gymapi from isaacgym import gymtorch from isaacgym.gymutil import get_property_getter_map from isaacgym.gymutil import get_property_setter_map from isaacgymenvs.utils.torch_jit_utils import * from loguru import logger from dexenv.envs.base.vec_task import VecTask from dexenv.envs.rewards import compute_dclaw_reward from dexenv.utils.common import get_module_path from dexenv.utils.common import pathlib_file from dexenv.utils.hand_color import dclaw_body_color_mapping from dexenv.utils.isaac_utils import get_camera_params from dexenv.utils.torch_utils import random_quaternions from dexenv.utils.torch_utils import torch_long
11,165
if self.cfg["env"]["effort_limit"] is not None: effort_limit = self.cfg["env"]["effort_limit"] if not self.cfg.env.soft_control else self.cfg["env"]["soft_effort_limit"] print(f'Setting DOF effort limit to:{effort_limit}') set_dof_prop(dclaw_dof_props, 'effort', effort_limit) if self.cfg["env"]["stiffness"] is not None: stiffness = self.cfg["env"]["stiffness"] if not self.cfg.env.soft_control else self.cfg["env"]["soft_stiffness"] print(f'Setting stiffness to:{stiffness}') set_dof_prop(dclaw_dof_props, 'stiffness', stiffness) if self.cfg["env"]["damping"] is not None: damping = self.cfg["env"]["damping"] if not self.cfg.env.soft_control else self.cfg["env"]["soft_damping"] print(f'Setting damping to:{damping}') set_dof_prop(dclaw_dof_props, 'damping', damping) self.dclaw_dof_lower_limits = [] self.dclaw_dof_upper_limits = [] self.dclaw_default_dof_states = np.zeros(self.num_dclaw_dofs, dtype=gymapi.DofState.dtype) self.dclaw_default_dof_pos = self.dclaw_default_dof_states['pos'] self.dclaw_default_dof_vel = self.dclaw_default_dof_states['vel'] for i in range(self.num_dclaw_dofs): self.dclaw_dof_lower_limits.append(dclaw_dof_props['lower'][i]) self.dclaw_dof_upper_limits.append(dclaw_dof_props['upper'][i]) if i % 3 == 1: self.dclaw_default_dof_pos[i] = 0.8 elif i % 3 == 2: self.dclaw_default_dof_pos[i] = -1.1 else: self.dclaw_default_dof_pos[i] = 0. self.dclaw_default_dof_vel[i] = 0.0 self.dof_joint_indices = to_torch(self.dof_joint_indices, dtype=torch.long, device=self.device) self.dclaw_dof_lower_limits = to_torch(self.dclaw_dof_lower_limits, device=self.device) self.dclaw_dof_upper_limits = to_torch(self.dclaw_dof_upper_limits, device=self.device) self.dclaw_default_dof_pos = to_torch(self.dclaw_default_dof_pos, device=self.device) self.dclaw_default_dof_vel = to_torch(self.dclaw_default_dof_vel, device=self.device) self.fingertip_handles = [self.gym.find_asset_rigid_body_index(dclaw_asset, name) for name in self.fingertips] dclaw_asset_props = self.gym.get_asset_rigid_shape_properties(dclaw_asset) for p in dclaw_asset_props: p.friction = self.cfg.env.hand.friction p.torsion_friction = self.cfg.env.hand.torsion_friction p.rolling_friction = self.cfg.env.hand.rolling_friction p.restitution = self.cfg.env.hand.restitution self.gym.set_asset_rigid_shape_properties(dclaw_asset, dclaw_asset_props) return dclaw_asset, dclaw_dof_props def get_object_start_pose(self, dclaw_start_pose): object_start_pose = gymapi.Transform() object_start_pose.p = gymapi.Vec3() if self.cfg.env.obj_init_delta_pos is not None: delta_pos = self.cfg.env.obj_init_delta_pos object_start_pose.p.x = dclaw_start_pose.p.x + delta_pos[0] object_start_pose.p.y = dclaw_start_pose.p.y + delta_pos[1] object_start_pose.p.z = dclaw_start_pose.p.z + delta_pos[2] else: object_start_pose.p.x = dclaw_start_pose.p.x pose_dy, pose_dz = 0., -0.13 object_start_pose.p.y = dclaw_start_pose.p.y + pose_dy object_start_pose.p.z = dclaw_start_pose.p.z + pose_dz return object_start_pose def get_goal_object_start_pose(self, object_start_pose): self.goal_displacement = gymapi.Vec3(0., 0, 0.25) self.goal_displacement_tensor = to_torch( [self.goal_displacement.x, self.goal_displacement.y, self.goal_displacement.z], device=self.device) goal_start_pose = gymapi.Transform() goal_start_pose.p = object_start_pose.p + self.goal_displacement return goal_start_pose def set_dof_props(self, props_dict): param_setters_map = get_property_setter_map(self.gym) param_getters_map = get_property_getter_map(self.gym) prop_name = 'dof_properties' setter = param_setters_map[prop_name] for env_id in range(len(self.envs)): env = self.envs[env_id] handle = self.gym.find_actor_handle(env, 'hand') prop = param_getters_map[prop_name](env, handle) for dof_prop_name, dof_prop_values in props_dict.items(): if env_id == 0: assert len(dof_prop_values) == len(self.envs) prop_val = dof_prop_values[env_id] prop[dof_prop_name].fill(prop_val) success = setter(env, handle, prop) if not success: logger.warning(f'Setting dof properties is not successful!') def update_obj_mass(self, env_ids=None): object_rb_masses = [] env_pool = env_ids if env_ids is not None else list(range(self.num_envs)) if len(env_pool) < 1: return for env_id, object_handle in zip(env_pool, self.object_handles): env_ptr = self.envs[env_id] object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle) object_rb_masses.append([prop.mass for prop in object_rb_props]) if self.object_rb_masses is None: self.object_rb_masses = to_torch(object_rb_masses, dtype=torch.float, device=self.device) else: self.object_rb_masses[env_pool] = to_torch(object_rb_masses, dtype=torch.float, device=self.device) def reset(self) -> torch.Tensor: """Reset the environment. Returns: Observation dictionary """ zero_actions = self.zero_actions() self.reset_buf.fill_(1) self.reset_goal_buf.fill_(1) if self.cfg.env.action_ema is not None: self.action_ema_val = zero_actions.clone() # step the simulator self.step(zero_actions) return self.update_obs() def compute_reward(self, actions):
class DClawBase(VecTask): def __init__(self, cfg, sim_device, rl_device, graphics_device_id): self.cfg = cfg headless = self.cfg.headless self.randomize = self.cfg["task"]["randomize"] if self.randomize: logger.warning(f'Domain randomization is enabled!') self.randomization_params = self.cfg["task"]["randomization_params"] self.aggregate_mode = self.cfg["env"]["aggregateMode"] self.dist_reward_scale = self.cfg["env"]["rew"]["distRewardScale"] self.rot_reward_scale = self.cfg["env"]["rew"]["rotRewardScale"] self.success_tolerance = self.cfg["env"]["rew"]["successTolerance"] self.reach_goal_bonus = self.cfg["env"]["rew"]["reachGoalBonus"] self.fall_dist = self.cfg["env"]["rew"]["fallDistance"] self.fall_penalty = self.cfg["env"]["rew"]["fallPenalty"] self.rot_eps = self.cfg["env"]["rew"]["rotEps"] self.vel_obs_scale = 0.2 # scale factor of velocity based observations self.force_torque_obs_scale = 10.0 # scale factor of velocity based observations self.reset_position_noise = self.cfg["env"]["resetPositionNoise"] self.reset_rotation_noise = self.cfg["env"]["resetRotationNoise"] self.reset_dof_pos_noise = self.cfg["env"]["resetDofPosRandomInterval"] self.reset_dof_vel_noise = self.cfg["env"]["resetDofVelRandomInterval"] self.force_scale = self.cfg["env"].get("forceScale", 0.0) self.force_prob_range = self.cfg["env"].get("forceProbRange", [0.001, 0.1]) self.force_decay = self.cfg["env"].get("forceDecay", 0.99) self.force_decay_interval = self.cfg["env"].get("forceDecayInterval", 0.08) self.dclaw_dof_speed_scale = self.cfg["env"]["dofSpeedScale"] # self.act_moving_average = self.cfg["env"]["actionsMovingAverage"] self.debug_viz = self.cfg["env"]["enableDebugVis"] self.max_episode_length = self.cfg["env"]["episodeLength"] self.reset_time = self.cfg["env"].get("resetTime", -1.0) self.print_success_stat = self.cfg["env"]["printNumSuccesses"] self.max_consecutive_successes = self.cfg["env"]["maxConsecutiveSuccesses"] self.av_factor = self.cfg["env"].get("averFactor", 0.1) self.object_type = self.cfg["env"]["objectType"] self.asset_files_dict = { "block": "urdf/objects/cube_multicolor.urdf", "egg": "mjcf/open_ai_assets/hand/egg.xml", "airplane": "single_objects/airplane/model.urdf", 'power_drill': 'single_objects/power_drill/model.urdf', 'mug': 'single_objects/mug/model.urdf', 'elephant': 'asymm/train/elephant/var_000/model.urdf', 'train': 'asymm/train/train/var_000/model.urdf', 'stanford_bunny': 'asymm/train/stanford_bunny/var_004/model.urdf' } self.objs_in_isaacgym = ['block', 'egg'] if "asset" in self.cfg["env"]: self.asset_files_dict["block"] = self.cfg["env"]["asset"].get("assetFileNameBlock", self.asset_files_dict["block"]) self.asset_files_dict["egg"] = self.cfg["env"]["asset"].get("assetFileNameEgg", self.asset_files_dict["egg"]) self.obs_type = self.cfg["env"]["observationType"] if not (self.obs_type in ["full_no_vel", "full", "full_state"]): raise Exception( "Unknown type of observations!\nobservationType should be one of: [openai, full_no_vel, full, full_state]") print("Obs type:", self.obs_type) ## TODO: change value here self.num_obs_dict = { "full_no_vel": 42, "full": 87, "full_state": 114 } self.up_axis = 'z' num_states = 0 self.cfg["env"]["numObservations"] = self.num_obs_dict[self.obs_type] self.cfg["env"]["numStates"] = num_states self.cfg["env"]["numActions"] = 12 self.hist_buf_reset_env_ids = None super().__init__(config=self.cfg, sim_device=sim_device, rl_device=rl_device, graphics_device_id=graphics_device_id, headless=headless) self.dt = self.sim_params.dt control_freq_inv = self.cfg["env"].get("controlFrequencyInv", 1) if self.reset_time > 0.0: self.max_episode_length = int(round(self.reset_time / (control_freq_inv * self.dt))) print("Reset time: ", self.reset_time) print("New episode length: ", self.max_episode_length) if self.viewer != None: cam_pos = gymapi.Vec3(0.16, -0.5, 0.5) cam_target = gymapi.Vec3(0.0, 0.0, 0.15) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim) dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim) dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim) if self.obs_type == "full_state": sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim) self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, self.num_fingertips * 6) dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim) self.dof_force_tensor = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, self.num_dclaw_dofs) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) if self.cfg.env.dof_torque_on: self.gym.refresh_dof_force_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) self.dclaw_dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, :self.num_dclaw_dofs] self.dclaw_dof_pos = self.dclaw_dof_state[..., 0] self.dclaw_dof_vel = self.dclaw_dof_state[..., 1] if self.cfg.env.dof_torque_on: self.dclaw_dof_torque = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, -1) else: self.dclaw_dof_torque = None self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13) self.num_bodies = self.rigid_body_states.shape[1] self.root_state_tensor = gymtorch.wrap_tensor(actor_root_state_tensor).view(-1, 13) if self.cfg.env.rew.pen_tb_contact: _net_cf = self.gym.acquire_net_contact_force_tensor(self.sim) self.net_contact_force = gymtorch.wrap_tensor(_net_cf).view(self.num_envs, -1, 3) table_handle = self.gym.find_actor_handle(self.envs[0], 'table') self.table_body_index = self.gym.find_actor_rigid_body_index(self.envs[0], table_handle, 'table', gymapi.DOMAIN_ENV) logger.warning(f'Table body index:{self.table_body_index}') self.table_contact_force = self.net_contact_force[:, self.table_body_index] self.num_dofs = self.gym.get_sim_dof_count(self.sim) // self.num_envs self.prev_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device) self.cur_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device) self.global_indices = torch.arange(self.num_envs * 3, dtype=torch.int32, device=self.device).view(self.num_envs, -1) self.reset_goal_buf = self.reset_buf.clone() self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.consecutive_successes = torch.zeros(1, dtype=torch.float, device=self.device) self.av_factor = to_torch(self.av_factor, dtype=torch.float, device=self.device) self.total_successes = 0 self.total_resets = 0 self.force_decay = to_torch(self.force_decay, dtype=torch.float, device=self.device) self.force_prob_range = to_torch(self.force_prob_range, dtype=torch.float, device=self.device) self.random_force_prob = torch.exp((torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1])) * torch.rand(self.num_envs, device=self.device) + torch.log( self.force_prob_range[1])) self.rb_forces = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device) self.num_actions = self.num_dclaw_dofs self.actions = self.zero_actions() DClawBase.compute_observations(self) self.num_observations = self.obs_buf.shape[-1] self.cfg.env.numObservations = self.num_observations self.create_ob_act_space() def create_sim(self): self.dt = self.cfg["sim"]["dt"] self.up_axis_idx = self.set_sim_params_up_axis(self.sim_params, self.up_axis) self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) if self.randomize: self.apply_randomizations(self.randomization_params) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) plane_params.distance = 0.1 self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = dexenv.LIB_PATH.joinpath('assets', 'dclaw').as_posix() object_asset_file = self.asset_files_dict[self.object_type] dclaw_asset, dclaw_dof_props = self.get_dclaw_asset(asset_root=asset_root) table_asset = self.get_table_asset() table_pose = self.get_table_pose() if self.obs_type == "full_state": sensor_pose = gymapi.Transform() for ft_handle in self.fingertip_handles: self.gym.create_asset_force_sensor(dclaw_asset, ft_handle, sensor_pose) if self.object_type in self.objs_in_isaacgym: asset_root = get_module_path('isaacgymenvs').parent.joinpath('assets').as_posix() else: asset_root = dexenv.LIB_PATH.joinpath('assets').as_posix() object_asset_options = gymapi.AssetOptions() if self.cfg.env.vhacd: object_asset_options.convex_decomposition_from_submeshes = True object_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options) object_asset_options.disable_gravity = True goal_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options) dclaw_start_pose = self.get_dclaw_start_pose() object_start_pose = self.get_object_start_pose(dclaw_start_pose) goal_start_pose = self.get_goal_object_start_pose(object_start_pose=object_start_pose) self.dclaws = [] self.envs = [] self.object_init_state = [] self.hand_start_states = [] self.hand_indices = [] self.fingertip_indices = [] self.object_indices = [] self.goal_object_indices = [] self.render_camera_handles = [] if self.cfg.rgb_render: render_cam_pose, render_cam_params = self.get_visual_render_camera_setup() self.fingertip_handles = [self.gym.find_asset_rigid_body_index(dclaw_asset, name) for name in self.fingertips] print(f'Fingertip handles:{self.fingertip_handles}') dclaw_rb_count = self.gym.get_asset_rigid_body_count(dclaw_asset) object_rb_count = self.gym.get_asset_rigid_body_count(object_asset) object_rs_count = self.gym.get_asset_rigid_shape_count(object_asset) self.object_rb_handles = list(range(dclaw_rb_count, dclaw_rb_count + object_rb_count)) self.object_handles = [] max_agg_bodies = self.num_dclaw_bodies + 2 * object_rb_count + 1 max_agg_shapes = self.num_dclaw_shapes + 2 * object_rs_count + 1 for i in range(self.num_envs): env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) if self.aggregate_mode >= 1: self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) self.create_hand_actor(env_ptr=env_ptr, dclaw_asset=dclaw_asset, dclaw_start_pose=dclaw_start_pose, dclaw_dof_props=dclaw_dof_props, env_id=i) object_handle = self.gym.create_actor(env_ptr, object_asset, object_start_pose, "object", i, 0, 1) self.object_handles.append(object_handle) self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z, object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z, object_start_pose.r.w, 0, 0, 0, 0, 0, 0]) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) self.object_indices.append(object_idx) goal_handle = self.gym.create_actor(env_ptr, goal_asset, goal_start_pose, "goal_object", i + self.num_envs, 0, 2) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) if self.cfg.env.blockscale is not None and self.cfg.env.objectType == 'block': blockscale = float(self.cfg.env.blockscale) self.gym.set_actor_scale(env_ptr, object_handle, blockscale) self.gym.set_actor_scale(env_ptr, goal_handle, blockscale) if self.object_type != "block": self.gym.set_rigid_body_color( env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98)) self.gym.set_rigid_body_color( env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98)) table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table", i, 0) if self.cfg.rgb_render: render_camera_handle = self.create_camera(render_cam_pose, env_ptr, render_cam_params) self.render_camera_handles.append(render_camera_handle[0]) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) self.setup_torch_states() def create_camera(self, camera_poses, env_ptr, camera_params): cam_handles = [] for ic in range(min(len(camera_poses), self.cfg.cam.cam_num)): camera_handle = self.gym.create_camera_sensor(env_ptr, camera_params) if isinstance(camera_poses[ic], tuple): self.gym.set_camera_location(camera_handle, env_ptr, camera_poses[ic][0], camera_poses[ic][1]) else: self.gym.set_camera_transform(camera_handle, env_ptr, camera_poses[ic]) cam_handles.append(camera_handle) return cam_handles def get_visual_render_camera_setup(self): cam_pos = np.array([-0.7, 0, 0.5]) cam_focus_pt = np.array([0.08, 0, 0.15]) cam_focus_pt = gymapi.Vec3(*cam_focus_pt) cam_pos = gymapi.Vec3(*cam_pos) camera_poses = [(cam_pos, cam_focus_pt)] camera_params = get_camera_params(width=self.cfg.cam.visual_render_width, height=self.cfg.cam.visual_render_height, hov=45, cuda=False) return camera_poses, camera_params def create_hand_actor(self, env_ptr, dclaw_asset, dclaw_start_pose, dclaw_dof_props, env_id): dclaw_actor = self.gym.create_actor(env_ptr, dclaw_asset, dclaw_start_pose, "hand", env_id, 0, 0) if self.cfg.env.dof_torque_on: self.gym.enable_actor_dof_force_sensors(env_ptr, dclaw_actor) self.hand_start_states.append( [dclaw_start_pose.p.x, dclaw_start_pose.p.y, dclaw_start_pose.p.z, dclaw_start_pose.r.x, dclaw_start_pose.r.y, dclaw_start_pose.r.z, dclaw_start_pose.r.w, 0, 0, 0, 0, 0, 0]) self.gym.set_actor_dof_properties(env_ptr, dclaw_actor, dclaw_dof_props) hand_idx = self.gym.get_actor_index(env_ptr, dclaw_actor, gymapi.DOMAIN_SIM) self.hand_indices.append(hand_idx) self.gym.set_actor_dof_states(env_ptr, dclaw_actor, self.dclaw_default_dof_states, gymapi.STATE_ALL) if self.obs_type == "full_state": self.gym.enable_actor_dof_force_sensors(env_ptr, dclaw_actor) self.dclaws.append(dclaw_actor) self.set_hand_color(env_ptr, dclaw_actor) def set_hand_color(self, env_ptr, dclaw_actor): rgd_dict = self.gym.get_actor_rigid_body_dict(env_ptr, dclaw_actor) for bd, bd_id in rgd_dict.items(): if bd not in dclaw_body_color_mapping: continue color = gymapi.Vec3(*dclaw_body_color_mapping[bd]) self.gym.set_rigid_body_color(env_ptr, dclaw_actor, bd_id, gymapi.MESH_VISUAL, color) def get_table_asset(self): asset_options = gymapi.AssetOptions() asset_options.armature = 0.001 asset_options.fix_base_link = True asset_options.thickness = 0.001 asset_options.disable_gravity = True table_dims = gymapi.Vec3(0.6, 0.6, 0.1) table_asset = self.gym.create_box(self.sim, table_dims.x, table_dims.y, table_dims.z, asset_options) table_props = self.gym.get_asset_rigid_shape_properties(table_asset) for p in table_props: p.friction = self.cfg.env.table.friction p.torsion_friction = self.cfg.env.table.torsion_friction p.restitution = self.cfg.env.table.restitution p.rolling_friction = self.cfg.env.table.rolling_friction self.gym.set_asset_rigid_shape_properties(table_asset, table_props) return table_asset def get_table_pose(self): object_start_pose = gymapi.Transform() object_start_pose.p = gymapi.Vec3() object_start_pose.p.x = 0 object_start_pose.p.y = 0 object_start_pose.p.z = -0.05 return object_start_pose def get_dclaw_start_pose(self): dclaw_start_pose = gymapi.Transform() dclaw_start_pose.p = gymapi.Vec3(*get_axis_params(0.25, self.up_axis_idx)) dclaw_start_pose.r = gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 1, 0), np.pi) return dclaw_start_pose def setup_torch_states(self): self.render_rgb_obs_buf = None if self.cfg.rgb_render: self.gym.set_light_parameters(self.sim, 0, gymapi.Vec3(0.9, 0.9, 0.9), gymapi.Vec3(0.9, 0.9, 0.9), gymapi.Vec3(0, 0, 0)) else: self.gym.set_light_parameters(self.sim, 0, gymapi.Vec3(0.9, 0.9, 0.9), gymapi.Vec3(0.7, 0.7, 0.7), gymapi.Vec3(0, 0, 0)) self.object_init_state = to_torch(self.object_init_state, device=self.device, dtype=torch.float).view( self.num_envs, 13) self.goal_states = self.object_init_state.clone() self.goal_states[:, self.up_axis_idx] -= 0.04 self.goal_init_state = self.goal_states.clone() self.hand_start_states = to_torch(self.hand_start_states, device=self.device).view(self.num_envs, 13) self.fingertip_handles = to_torch(self.fingertip_handles, dtype=torch.long, device=self.device) self.object_rb_handles = to_torch(self.object_rb_handles, dtype=torch.long, device=self.device) self.object_rb_masses = None self.update_obj_mass() self.hand_indices = to_torch(self.hand_indices, dtype=torch.long, device=self.device) self.object_indices = to_torch(self.object_indices, dtype=torch.long, device=self.device) self.goal_object_indices = to_torch(self.goal_object_indices, dtype=torch.long, device=self.device) def get_dclaw_asset(self, asset_root=None, asset_options=None): # load dclaw asset if asset_options is None: asset_options = gymapi.AssetOptions() asset_options.flip_visual_attachments = False asset_options.fix_base_link = True asset_options.collapse_fixed_joints = False asset_options.disable_gravity = False asset_options.thickness = 0.001 asset_options.angular_damping = 0.01 asset_options.override_inertia = True asset_options.override_com = True logger.info(f'VHACD:{self.cfg.env.vhacd}') if self.cfg.env.vhacd: asset_options.convex_decomposition_from_submeshes = True if self.cfg.physics_engine == "physx": # if self.physics_engine == gymapi.SIM_PHYSX: asset_options.use_physx_armature = True asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS if asset_root is None: asset_root = dexenv.LIB_PATH.joinpath('assets', 'dclaw_4f').as_posix() robot_name = self.cfg.env.robot asset_root = pathlib_file(asset_root).parent.joinpath(f'{robot_name}').as_posix() dclaw_asset = self.gym.load_asset(self.sim, asset_root, f"{robot_name}.urdf", asset_options) print(f'Dclaw asset root:{asset_root} robot name:{robot_name}') self.num_dclaw_bodies = self.gym.get_asset_rigid_body_count(dclaw_asset) self.num_dclaw_shapes = self.gym.get_asset_rigid_shape_count(dclaw_asset) self.num_dclaw_dofs = self.gym.get_asset_dof_count(dclaw_asset) print(f'D-Claw:') print(f'\t Number of bodies: {self.num_dclaw_bodies}') print(f'\t Number of shapes: {self.num_dclaw_shapes}') print(f'\t Number of dofs: {self.num_dclaw_dofs}') self.dclaw_asset_dof_dict = self.gym.get_asset_dof_dict(dclaw_asset) joint_names = self.dclaw_asset_dof_dict.keys() logger.info(f'Joint names:{joint_names}') self.dof_joint_indices = list(self.dclaw_asset_dof_dict.values()) dinds = np.array(self.dof_joint_indices) assert np.all(np.diff(dinds) > 0) # check if it's in a sorted order (ascending) rb_links = self.gym.get_asset_rigid_body_names(dclaw_asset) self.fingertips = [x for x in rb_links if 'tip_link' in x] # ["one_tip_link", "two_tip_link", "three_tip_link"] self.num_fingertips = len(self.fingertips) print(f'Number of fingertips:{self.num_fingertips} Fingertips:{self.fingertips}') print(f'Actuator --- DoF Index') for act_name, act_index in zip(joint_names, self.dof_joint_indices): print(f'\t {act_name} {act_index}') dclaw_dof_props = self.gym.get_asset_dof_properties(dclaw_asset) def set_dof_prop(props, prop_name, val): if np.isscalar(val): props[prop_name].fill(val) elif len(val) == 3: props[prop_name] = np.array(list(val) * int(len(props[prop_name]) / 3)) else: props[prop_name] = np.array(val) if self.cfg["env"]["dof_vel_hard_limit"] is not None: vel_hard_limit = self.cfg["env"]["dof_vel_hard_limit"] if not self.cfg.env.soft_control else self.cfg["env"]["soft_dof_vel_hard_limit"] print(f'Setting DOF velocity limit to:{vel_hard_limit}') set_dof_prop(dclaw_dof_props, 'velocity', vel_hard_limit) if self.cfg["env"]["effort_limit"] is not None: effort_limit = self.cfg["env"]["effort_limit"] if not self.cfg.env.soft_control else self.cfg["env"]["soft_effort_limit"] print(f'Setting DOF effort limit to:{effort_limit}') set_dof_prop(dclaw_dof_props, 'effort', effort_limit) if self.cfg["env"]["stiffness"] is not None: stiffness = self.cfg["env"]["stiffness"] if not self.cfg.env.soft_control else self.cfg["env"]["soft_stiffness"] print(f'Setting stiffness to:{stiffness}') set_dof_prop(dclaw_dof_props, 'stiffness', stiffness) if self.cfg["env"]["damping"] is not None: damping = self.cfg["env"]["damping"] if not self.cfg.env.soft_control else self.cfg["env"]["soft_damping"] print(f'Setting damping to:{damping}') set_dof_prop(dclaw_dof_props, 'damping', damping) self.dclaw_dof_lower_limits = [] self.dclaw_dof_upper_limits = [] self.dclaw_default_dof_states = np.zeros(self.num_dclaw_dofs, dtype=gymapi.DofState.dtype) self.dclaw_default_dof_pos = self.dclaw_default_dof_states['pos'] self.dclaw_default_dof_vel = self.dclaw_default_dof_states['vel'] for i in range(self.num_dclaw_dofs): self.dclaw_dof_lower_limits.append(dclaw_dof_props['lower'][i]) self.dclaw_dof_upper_limits.append(dclaw_dof_props['upper'][i]) if i % 3 == 1: self.dclaw_default_dof_pos[i] = 0.8 elif i % 3 == 2: self.dclaw_default_dof_pos[i] = -1.1 else: self.dclaw_default_dof_pos[i] = 0. self.dclaw_default_dof_vel[i] = 0.0 self.dof_joint_indices = to_torch(self.dof_joint_indices, dtype=torch.long, device=self.device) self.dclaw_dof_lower_limits = to_torch(self.dclaw_dof_lower_limits, device=self.device) self.dclaw_dof_upper_limits = to_torch(self.dclaw_dof_upper_limits, device=self.device) self.dclaw_default_dof_pos = to_torch(self.dclaw_default_dof_pos, device=self.device) self.dclaw_default_dof_vel = to_torch(self.dclaw_default_dof_vel, device=self.device) self.fingertip_handles = [self.gym.find_asset_rigid_body_index(dclaw_asset, name) for name in self.fingertips] dclaw_asset_props = self.gym.get_asset_rigid_shape_properties(dclaw_asset) for p in dclaw_asset_props: p.friction = self.cfg.env.hand.friction p.torsion_friction = self.cfg.env.hand.torsion_friction p.rolling_friction = self.cfg.env.hand.rolling_friction p.restitution = self.cfg.env.hand.restitution self.gym.set_asset_rigid_shape_properties(dclaw_asset, dclaw_asset_props) return dclaw_asset, dclaw_dof_props def get_object_start_pose(self, dclaw_start_pose): object_start_pose = gymapi.Transform() object_start_pose.p = gymapi.Vec3() if self.cfg.env.obj_init_delta_pos is not None: delta_pos = self.cfg.env.obj_init_delta_pos object_start_pose.p.x = dclaw_start_pose.p.x + delta_pos[0] object_start_pose.p.y = dclaw_start_pose.p.y + delta_pos[1] object_start_pose.p.z = dclaw_start_pose.p.z + delta_pos[2] else: object_start_pose.p.x = dclaw_start_pose.p.x pose_dy, pose_dz = 0., -0.13 object_start_pose.p.y = dclaw_start_pose.p.y + pose_dy object_start_pose.p.z = dclaw_start_pose.p.z + pose_dz return object_start_pose def get_goal_object_start_pose(self, object_start_pose): self.goal_displacement = gymapi.Vec3(0., 0, 0.25) self.goal_displacement_tensor = to_torch( [self.goal_displacement.x, self.goal_displacement.y, self.goal_displacement.z], device=self.device) goal_start_pose = gymapi.Transform() goal_start_pose.p = object_start_pose.p + self.goal_displacement return goal_start_pose def set_dof_props(self, props_dict): param_setters_map = get_property_setter_map(self.gym) param_getters_map = get_property_getter_map(self.gym) prop_name = 'dof_properties' setter = param_setters_map[prop_name] for env_id in range(len(self.envs)): env = self.envs[env_id] handle = self.gym.find_actor_handle(env, 'hand') prop = param_getters_map[prop_name](env, handle) for dof_prop_name, dof_prop_values in props_dict.items(): if env_id == 0: assert len(dof_prop_values) == len(self.envs) prop_val = dof_prop_values[env_id] prop[dof_prop_name].fill(prop_val) success = setter(env, handle, prop) if not success: logger.warning(f'Setting dof properties is not successful!') def update_obj_mass(self, env_ids=None): object_rb_masses = [] env_pool = env_ids if env_ids is not None else list(range(self.num_envs)) if len(env_pool) < 1: return for env_id, object_handle in zip(env_pool, self.object_handles): env_ptr = self.envs[env_id] object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle) object_rb_masses.append([prop.mass for prop in object_rb_props]) if self.object_rb_masses is None: self.object_rb_masses = to_torch(object_rb_masses, dtype=torch.float, device=self.device) else: self.object_rb_masses[env_pool] = to_torch(object_rb_masses, dtype=torch.float, device=self.device) def reset(self) -> torch.Tensor: """Reset the environment. Returns: Observation dictionary """ zero_actions = self.zero_actions() self.reset_buf.fill_(1) self.reset_goal_buf.fill_(1) if self.cfg.env.action_ema is not None: self.action_ema_val = zero_actions.clone() # step the simulator self.step(zero_actions) return self.update_obs() def compute_reward(self, actions):
res = compute_dclaw_reward(
1
2023-10-25 17:22:41+00:00
16k
CVHub520/yolov5_obb
val.py
[ { "identifier": "poly2hbb", "path": "utils/rboxs_utils.py", "snippet": "def poly2hbb(polys):\n \"\"\"\n Trans poly format to hbb format\n Args:\n rboxes (array/tensor): (num_gts, poly) \n\n Returns:\n hbboxes (array/tensor): (num_gts, [xc yc w h]) \n \"\"\"\n assert polys.shape[-1] == 8\n if isinstance(polys, torch.Tensor):\n x = polys[:, 0::2] # (num, 4) \n y = polys[:, 1::2]\n x_max = torch.amax(x, dim=1) # (num)\n x_min = torch.amin(x, dim=1)\n y_max = torch.amax(y, dim=1)\n y_min = torch.amin(y, dim=1)\n x_ctr, y_ctr = (x_max + x_min) / 2.0, (y_max + y_min) / 2.0 # (num)\n h = y_max - y_min # (num)\n w = x_max - x_min\n x_ctr, y_ctr, w, h = x_ctr.reshape(-1, 1), y_ctr.reshape(-1, 1), w.reshape(-1, 1), h.reshape(-1, 1) # (num, 1)\n hbboxes = torch.cat((x_ctr, y_ctr, w, h), dim=1)\n else:\n x = polys[:, 0::2] # (num, 4) \n y = polys[:, 1::2]\n x_max = np.amax(x, axis=1) # (num)\n x_min = np.amin(x, axis=1) \n y_max = np.amax(y, axis=1)\n y_min = np.amin(y, axis=1)\n x_ctr, y_ctr = (x_max + x_min) / 2.0, (y_max + y_min) / 2.0 # (num)\n h = y_max - y_min # (num)\n w = x_max - x_min\n x_ctr, y_ctr, w, h = x_ctr.reshape(-1, 1), y_ctr.reshape(-1, 1), w.reshape(-1, 1), h.reshape(-1, 1) # (num, 1)\n hbboxes = np.concatenate((x_ctr, y_ctr, w, h), axis=1)\n return hbboxes" }, { "identifier": "rbox2poly", "path": "utils/rboxs_utils.py", "snippet": "def rbox2poly(obboxes):\n \"\"\"\n Trans rbox format to poly format.\n Args:\n rboxes (array/tensor): (num_gts, [cx cy l s θ]) θ∈[-pi/2, pi/2)\n\n Returns:\n polys (array/tensor): (num_gts, [x1 y1 x2 y2 x3 y3 x4 y4]) \n \"\"\"\n if isinstance(obboxes, torch.Tensor):\n center, w, h, theta = obboxes[:, :2], obboxes[:, 2:3], obboxes[:, 3:4], obboxes[:, 4:5]\n Cos, Sin = torch.cos(theta), torch.sin(theta)\n\n vector1 = torch.cat(\n (w/2 * Cos, -w/2 * Sin), dim=-1)\n vector2 = torch.cat(\n (-h/2 * Sin, -h/2 * Cos), dim=-1)\n point1 = center + vector1 + vector2\n point2 = center + vector1 - vector2\n point3 = center - vector1 - vector2\n point4 = center - vector1 + vector2\n order = obboxes.shape[:-1]\n return torch.cat(\n (point1, point2, point3, point4), dim=-1).reshape(*order, 8)\n else:\n center, w, h, theta = np.split(obboxes, (2, 3, 4), axis=-1)\n Cos, Sin = np.cos(theta), np.sin(theta)\n\n vector1 = np.concatenate(\n [w/2 * Cos, -w/2 * Sin], axis=-1)\n vector2 = np.concatenate(\n [-h/2 * Sin, -h/2 * Cos], axis=-1)\n\n point1 = center + vector1 + vector2\n point2 = center + vector1 - vector2\n point3 = center - vector1 - vector2\n point4 = center - vector1 + vector2\n order = obboxes.shape[:-1]\n return np.concatenate(\n [point1, point2, point3, point4], axis=-1).reshape(*order, 8)" }, { "identifier": "DetectMultiBackend", "path": "models/common.py", "snippet": "class DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=None, dnn=False):\n # Usage:\n # PyTorch: weights = *.pt\n # TorchScript: *.torchscript\n # CoreML: *.mlmodel\n # TensorFlow: *_saved_model\n # TensorFlow: *.pb\n # TensorFlow Lite: *.tflite\n # ONNX Runtime: *.onnx\n # OpenCV DNN: *.onnx with dnn=True\n # TensorRT: *.engine\n from models.experimental import attempt_download, attempt_load # scoped to avoid circular import\n\n super().__init__()\n w = str(weights[0] if isinstance(weights, list) else weights)\n suffix = Path(w).suffix.lower()\n suffixes = ['.pt', '.torchscript', '.onnx', '.engine', '.tflite', '.pb', '', '.mlmodel']\n check_suffix(w, suffixes) # check weights have acceptable suffix\n pt, jit, onnx, engine, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans\n stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults\n w = attempt_download(w) # download if not local\n\n if jit: # TorchScript\n LOGGER.info(f'Loading {w} for TorchScript inference...')\n extra_files = {'config.txt': ''} # model metadata\n model = torch.jit.load(w, _extra_files=extra_files)\n if extra_files['config.txt']:\n d = json.loads(extra_files['config.txt']) # extra_files dict\n stride, names = int(d['stride']), d['names']\n elif pt: # PyTorch\n model = attempt_load(weights if isinstance(weights, list) else w, map_location=device)\n stride = int(model.stride.max()) # model stride\n names = model.module.names if hasattr(model, 'module') else model.names # get class names\n self.model = model # explicitly assign for to(), cpu(), cuda(), half()\n elif coreml: # CoreML\n LOGGER.info(f'Loading {w} for CoreML inference...')\n import coremltools as ct\n model = ct.models.MLModel(w)\n elif dnn: # ONNX OpenCV DNN\n LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')\n check_requirements(('opencv-python>=4.5.4',))\n net = cv2.dnn.readNetFromONNX(w)\n elif onnx: # ONNX Runtime\n LOGGER.info(f'Loading {w} for ONNX Runtime inference...')\n cuda = torch.cuda.is_available()\n check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))\n import onnxruntime\n providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']\n session = onnxruntime.InferenceSession(w, providers=providers)\n elif engine: # TensorRT\n LOGGER.info(f'Loading {w} for TensorRT inference...')\n import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download\n check_version(trt.__version__, '8.0.0', verbose=True) # version requirement\n Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))\n logger = trt.Logger(trt.Logger.INFO)\n with open(w, 'rb') as f, trt.Runtime(logger) as runtime:\n model = runtime.deserialize_cuda_engine(f.read())\n bindings = OrderedDict()\n for index in range(model.num_bindings):\n name = model.get_binding_name(index)\n dtype = trt.nptype(model.get_binding_dtype(index))\n shape = tuple(model.get_binding_shape(index))\n data = torch.from_numpy(np.empty(shape, dtype=np.dtype(dtype))).to(device)\n bindings[name] = Binding(name, dtype, shape, data, int(data.data_ptr()))\n binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())\n context = model.create_execution_context()\n batch_size = bindings['images'].shape[0]\n else: # TensorFlow model (TFLite, pb, saved_model)\n if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt\n LOGGER.info(f'Loading {w} for TensorFlow *.pb inference...')\n import tensorflow as tf\n\n def wrap_frozen_graph(gd, inputs, outputs):\n x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=\"\"), []) # wrapped\n return x.prune(tf.nest.map_structure(x.graph.as_graph_element, inputs),\n tf.nest.map_structure(x.graph.as_graph_element, outputs))\n\n graph_def = tf.Graph().as_graph_def()\n graph_def.ParseFromString(open(w, 'rb').read())\n frozen_func = wrap_frozen_graph(gd=graph_def, inputs=\"x:0\", outputs=\"Identity:0\")\n elif saved_model:\n LOGGER.info(f'Loading {w} for TensorFlow saved_model inference...')\n import tensorflow as tf\n model = tf.keras.models.load_model(w)\n elif tflite: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python\n if 'edgetpu' in w.lower():\n LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')\n import tflite_runtime.interpreter as tfli\n delegate = {'Linux': 'libedgetpu.so.1', # install https://coral.ai/software/#edgetpu-runtime\n 'Darwin': 'libedgetpu.1.dylib',\n 'Windows': 'edgetpu.dll'}[platform.system()]\n interpreter = tfli.Interpreter(model_path=w, experimental_delegates=[tfli.load_delegate(delegate)])\n else:\n LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')\n import tensorflow as tf\n interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model\n interpreter.allocate_tensors() # allocate\n input_details = interpreter.get_input_details() # inputs\n output_details = interpreter.get_output_details() # outputs\n self.__dict__.update(locals()) # assign all variables to self\n\n def forward(self, im, augment=False, visualize=False, val=False):\n # YOLOv5 MultiBackend inference\n b, ch, h, w = im.shape # batch, channel, height, width\n if self.pt or self.jit: # PyTorch\n y = self.model(im) if self.jit else self.model(im, augment=augment, visualize=visualize)\n return y if val else y[0]\n elif self.coreml: # CoreML\n im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3)\n im = Image.fromarray((im[0] * 255).astype('uint8'))\n # im = im.resize((192, 320), Image.ANTIALIAS)\n y = self.model.predict({'image': im}) # coordinates are xywh normalized\n box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels\n conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)\n y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)\n elif self.onnx: # ONNX\n im = im.cpu().numpy() # torch to numpy\n if self.dnn: # ONNX OpenCV DNN\n self.net.setInput(im)\n y = self.net.forward()\n else: # ONNX Runtime\n y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0]\n elif self.engine: # TensorRT\n assert im.shape == self.bindings['images'].shape, (im.shape, self.bindings['images'].shape)\n self.binding_addrs['images'] = int(im.data_ptr())\n self.context.execute_v2(list(self.binding_addrs.values()))\n y = self.bindings['output'].data\n else: # TensorFlow model (TFLite, pb, saved_model)\n im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3)\n if self.pb:\n y = self.frozen_func(x=self.tf.constant(im)).numpy()\n elif self.saved_model:\n y = self.model(im, training=False).numpy()\n elif self.tflite:\n input, output = self.input_details[0], self.output_details[0]\n int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model\n if int8:\n scale, zero_point = input['quantization']\n im = (im / scale + zero_point).astype(np.uint8) # de-scale\n self.interpreter.set_tensor(input['index'], im)\n self.interpreter.invoke()\n y = self.interpreter.get_tensor(output['index'])\n if int8:\n scale, zero_point = output['quantization']\n y = (y.astype(np.float32) - zero_point) * scale # re-scale\n y[..., 0] *= w # x\n y[..., 1] *= h # y\n y[..., 2] *= w # w\n y[..., 3] *= h # h\n y = torch.tensor(y) if isinstance(y, np.ndarray) else y\n return (y, []) if val else y\n\n def warmup(self, imgsz=(1, 3, 640, 640), half=False):\n # Warmup model by running inference once\n if self.pt or self.engine or self.onnx: # warmup types\n if isinstance(self.device, torch.device) and self.device.type != 'cpu': # only warmup GPU models\n im = torch.zeros(*imgsz).to(self.device).type(torch.half if half else torch.float) # input image\n self.forward(im) # warmup" }, { "identifier": "Callbacks", "path": "utils/callbacks.py", "snippet": "class Callbacks:\n \"\"\"\"\n Handles all registered callbacks for YOLOv5 Hooks\n \"\"\"\n\n def __init__(self):\n # Define the available callbacks\n self._callbacks = {\n 'on_pretrain_routine_start': [],\n 'on_pretrain_routine_end': [],\n\n 'on_train_start': [],\n 'on_train_epoch_start': [],\n 'on_train_batch_start': [],\n 'optimizer_step': [],\n 'on_before_zero_grad': [],\n 'on_train_batch_end': [],\n 'on_train_epoch_end': [],\n\n 'on_val_start': [],\n 'on_val_batch_start': [],\n 'on_val_image_end': [],\n 'on_val_batch_end': [],\n 'on_val_end': [],\n\n 'on_fit_epoch_end': [], # fit = train + val\n 'on_model_save': [],\n 'on_train_end': [],\n 'on_params_update': [],\n 'teardown': [],\n }\n\n def register_action(self, hook, name='', callback=None):\n \"\"\"\n Register a new action to a callback hook\n\n Args:\n hook The callback hook name to register the action to\n name The name of the action for later reference\n callback The callback to fire\n \"\"\"\n assert hook in self._callbacks, f\"hook '{hook}' not found in callbacks {self._callbacks}\"\n assert callable(callback), f\"callback '{callback}' is not callable\"\n self._callbacks[hook].append({'name': name, 'callback': callback})\n\n def get_registered_actions(self, hook=None):\n \"\"\"\"\n Returns all the registered actions by callback hook\n\n Args:\n hook The name of the hook to check, defaults to all\n \"\"\"\n if hook:\n return self._callbacks[hook]\n else:\n return self._callbacks\n\n def run(self, hook, *args, **kwargs):\n \"\"\"\n Loop through the registered actions and fire all callbacks\n\n Args:\n hook The name of the hook to check, defaults to all\n args Arguments to receive from YOLOv5\n kwargs Keyword Arguments to receive from YOLOv5\n \"\"\"\n\n assert hook in self._callbacks, f\"hook '{hook}' not found in callbacks {self._callbacks}\"\n\n for logger in self._callbacks[hook]:\n logger['callback'](*args, **kwargs)" }, { "identifier": "create_dataloader", "path": "utils/datasets.py", "snippet": "def create_dataloader(path, imgsz, batch_size, stride, names, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0,\n rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix='', shuffle=False):\n if rect and shuffle:\n LOGGER.warning('WARNING: --rect is incompatible with DataLoader shuffle, setting shuffle=False')\n shuffle = False\n with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP\n dataset = LoadImagesAndLabels(path, names, imgsz, batch_size,\n augment=augment, # augmentation\n hyp=hyp, # hyperparameters\n rect=rect, # rectangular batches\n cache_images=cache,\n single_cls=single_cls,\n stride=int(stride),\n pad=pad,\n image_weights=image_weights,\n prefix=prefix)\n\n batch_size = min(batch_size, len(dataset))\n nw = min([os.cpu_count() // WORLD_SIZE, batch_size if batch_size > 1 else 0, workers]) # number of workers\n sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)\n loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates\n return loader(dataset,\n batch_size=batch_size,\n shuffle=shuffle and sampler is None,\n num_workers=nw,\n sampler=sampler,\n pin_memory=True,\n collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn), dataset" }, { "identifier": "LOGGER", "path": "utils/general.py", "snippet": "FILE = Path(__file__).resolve()\nROOT = FILE.parents[1] # YOLOv5 root directory\nNUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads\nLOGGER = set_logging(__name__) # define globally (used in train.py, val.py, detect.py, etc.)\nNCOLS = 0 if is_docker() else shutil.get_terminal_size().columns # terminal window size for tqdm\ndef set_logging(name=None, verbose=True):\n def __enter__(self):\n def __exit__(self, type, value, traceback):\n def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True):\n def _timeout_handler(self, signum, frame):\n def __enter__(self):\n def __exit__(self, exc_type, exc_val, exc_tb):\n def __init__(self, new_dir):\n def __enter__(self):\n def __exit__(self, exc_type, exc_val, exc_tb):\ndef try_except(func):\n def handler(*args, **kwargs):\ndef methods(instance):\ndef print_args(name, opt):\ndef init_seeds(seed=0):\ndef intersect_dicts(da, db, exclude=()):\ndef get_latest_run(search_dir='.'):\ndef user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'):\ndef is_writeable(dir, test=False):\ndef is_docker():\ndef is_colab():\ndef is_pip():\ndef is_ascii(s=''):\ndef is_chinese(s='人工智能'):\ndef emojis(str=''):\ndef file_size(path):\ndef check_online():\ndef check_git_status():\ndef check_python(minimum='3.6.2'):\ndef check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False):\ndef check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True):\ndef check_img_size(imgsz, s=32, floor=0):\ndef check_imshow():\ndef check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''):\ndef check_yaml(file, suffix=('.yaml', '.yml')):\ndef check_file(file, suffix=''):\ndef check_dataset(data, autodownload=True):\ndef url2file(url):\ndef download(url, dir='.', unzip=True, delete=True, curl=False, threads=1):\n def download_one(url, dir):\ndef make_divisible(x, divisor):\ndef clean_str(s):\ndef one_cycle(y1=0.0, y2=1.0, steps=100):\ndef colorstr(*input):\ndef labels_to_class_weights(labels, nc=80):\ndef labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):\ndef coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)\ndef xyxy2xywh(x):\ndef xywh2xyxy(x):\ndef xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):\ndef xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):\ndef xyn2xy(x, w=640, h=640, padw=0, padh=0):\ndef segment2box(segment, width=640, height=640):\ndef segments2boxes(segments):\ndef resample_segments(segments, n=1000):\ndef scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):\ndef scale_polys(img1_shape, polys, img0_shape, ratio_pad=None):\ndef clip_polys(polys, shape):\ndef clip_coords(boxes, shape):\ndef non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,\n labels=(), max_det=300):\ndef non_max_suppression_obb(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,\n labels=(), max_det=1500):\ndef strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()\ndef print_mutation(results, hyp, save_dir, bucket):\ndef apply_classifier(x, model, img, im0):\ndef increment_path(path, exist_ok=False, sep='', mkdir=False):\nclass Profile(contextlib.ContextDecorator):\nclass Timeout(contextlib.ContextDecorator):\nclass WorkingDirectory(contextlib.ContextDecorator):" }, { "identifier": "ConfusionMatrix", "path": "utils/metrics.py", "snippet": "class ConfusionMatrix:\n # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix\n def __init__(self, nc, conf=0.25, iou_thres=0.45):\n self.matrix = np.zeros((nc + 1, nc + 1))\n self.nc = nc # number of classes\n self.conf = conf\n self.iou_thres = iou_thres\n\n def process_batch(self, detections, labels):\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n Arguments:\n detections (Array[N, 6]), x1, y1, x2, y2, conf, class\n labels (Array[M, 5]), class, x1, y1, x2, y2\n Returns:\n None, updates confusion matrix accordingly\n \"\"\"\n detections = detections[detections[:, 4] > self.conf]\n gt_classes = labels[:, 0].int()\n detection_classes = detections[:, 5].int()\n iou = box_iou(labels[:, 1:], detections[:, :4])\n\n x = torch.where(iou > self.iou_thres)\n if x[0].shape[0]:\n matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()\n if x[0].shape[0] > 1:\n matches = matches[matches[:, 2].argsort()[::-1]]\n matches = matches[np.unique(matches[:, 1], return_index=True)[1]]\n matches = matches[matches[:, 2].argsort()[::-1]]\n matches = matches[np.unique(matches[:, 0], return_index=True)[1]]\n else:\n matches = np.zeros((0, 3))\n\n n = matches.shape[0] > 0\n m0, m1, _ = matches.transpose().astype(np.int16)\n for i, gc in enumerate(gt_classes):\n j = m0 == i\n if n and sum(j) == 1:\n self.matrix[detection_classes[m1[j]], gc] += 1 # correct\n else:\n self.matrix[self.nc, gc] += 1 # background FP\n\n if n:\n for i, dc in enumerate(detection_classes):\n if not any(m1 == i):\n self.matrix[dc, self.nc] += 1 # background FN\n\n def matrix(self):\n return self.matrix\n\n def tp_fp(self):\n tp = self.matrix.diagonal() # true positives\n fp = self.matrix.sum(1) - tp # false positives\n # fn = self.matrix.sum(0) - tp # false negatives (missed detections)\n return tp[:-1], fp[:-1] # remove background class\n\n def plot(self, normalize=True, save_dir='', names=()):\n try:\n import seaborn as sn\n\n array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-6) if normalize else 1) # normalize columns\n array[array < 0.005] = np.nan # don't annotate (would appear as 0.00)\n\n fig = plt.figure(figsize=(12, 9), tight_layout=True)\n sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size\n labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels\n with warnings.catch_warnings():\n warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered\n sn.heatmap(array, annot=self.nc < 30, annot_kws={\"size\": 8}, cmap='Blues', fmt='.2f', square=True,\n xticklabels=names + ['background FP'] if labels else \"auto\",\n yticklabels=names + ['background FN'] if labels else \"auto\").set_facecolor((1, 1, 1))\n fig.axes[0].set_xlabel('True')\n fig.axes[0].set_ylabel('Predicted')\n fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250)\n plt.close()\n except Exception as e:\n print(f'WARNING: ConfusionMatrix plot failure: {e}')\n\n def print(self):\n for i in range(self.nc + 1):\n print(' '.join(map(str, self.matrix[i])))" }, { "identifier": "ap_per_class", "path": "utils/metrics.py", "snippet": "def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16):\n \"\"\" Compute the average precision, given the recall and precision curves.\n Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.\n # Arguments\n tp: True positives (nparray, nx1 or nx10).\n conf: Objectness value from 0-1 (nparray).\n pred_cls: Predicted object classes (nparray).\n target_cls: True object classes (nparray).\n plot: Plot precision-recall curve at [email protected]\n save_dir: Plot save directory\n # Returns\n The average precision as computed in py-faster-rcnn.\n \"\"\"\n\n # Sort by objectness\n i = np.argsort(-conf)\n tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]\n\n # Find unique classes\n unique_classes, nt = np.unique(target_cls, return_counts=True)\n nc = unique_classes.shape[0] # number of classes, number of detections\n\n # Create Precision-Recall curve and compute AP for each class\n px, py = np.linspace(0, 1, 1000), [] # for plotting\n ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000))\n for ci, c in enumerate(unique_classes):\n i = pred_cls == c\n n_l = nt[ci] # number of labels\n n_p = i.sum() # number of predictions\n\n if n_p == 0 or n_l == 0:\n continue\n else:\n # Accumulate FPs and TPs\n fpc = (1 - tp[i]).cumsum(0)\n tpc = tp[i].cumsum(0)\n\n # Recall\n recall = tpc / (n_l + eps) # recall curve\n r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases\n\n # Precision\n precision = tpc / (tpc + fpc) # precision curve\n p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score\n\n # AP from recall-precision curve\n for j in range(tp.shape[1]):\n ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])\n if plot and j == 0:\n py.append(np.interp(px, mrec, mpre)) # precision at [email protected]\n\n # Compute F1 (harmonic mean of precision and recall)\n f1 = 2 * p * r / (p + r + eps)\n names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data\n names = {i: v for i, v in enumerate(names)} # to dict\n if plot:\n plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names)\n plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1')\n plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision')\n plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall')\n\n i = f1.mean(0).argmax() # max F1 index\n p, r, f1 = p[:, i], r[:, i], f1[:, i]\n tp = (r * nt).round() # true positives\n fp = (tp / (p + eps) - tp).round() # false positives\n return tp, fp, p, r, f1, ap, unique_classes.astype('int32')" }, { "identifier": "output_to_target", "path": "utils/plots.py", "snippet": "def output_to_target(output): #list*(n, [xylsθ, conf, cls]) θ ∈ [-pi/2, pi/2)\n # Convert model output to target format [batch_id, class_id, x, y, l, s, theta, conf]\n targets = []\n for i, o in enumerate(output):\n for *rbox, conf, cls in o.cpu().numpy():\n targets.append([i, cls, *list(*(np.array(rbox)[None])), conf])\n return np.array(targets)" }, { "identifier": "plot_images", "path": "utils/plots.py", "snippet": "def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=2048, max_subplots=4):\n \"\"\"\n Args:\n imgs (tensor): (b, 3, height, width)\n targets_train (tensor): (n_targets, [batch_id clsid cx cy l s theta gaussian_θ_labels]) θ∈[-pi/2, pi/2)\n targets_pred (array): (n, [batch_id, class_id, cx, cy, l, s, theta, conf]) θ∈[-pi/2, pi/2)\n paths (list[str,...]): (b)\n fname (str): (1) \n names :\n\n \"\"\"\n # Plot image grid with labels\n if isinstance(images, torch.Tensor):\n images = images.cpu().float().numpy()\n if isinstance(targets, torch.Tensor):\n targets = targets.cpu().numpy()\n if np.max(images[0]) <= 1:\n images *= 255 # de-normalise (optional)\n bs, _, h, w = images.shape # batch size, _, height, width\n bs = min(bs, max_subplots) # limit plot images\n ns = np.ceil(bs ** 0.5) # number of subplots (square)\n\n # Build Image\n mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init\n for i, im in enumerate(images):\n if i == max_subplots: # if last batch has fewer images than we expect\n break\n x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin\n im = im.transpose(1, 2, 0)\n mosaic[y:y + h, x:x + w, :] = im\n\n # Resize (optional)\n scale = max_size / ns / max(h, w)\n if scale < 1:\n h = math.ceil(scale * h)\n w = math.ceil(scale * w)\n mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h)))\n\n # Annotate\n fs = int((h + w) * ns * 0.01) # font size\n annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True)\n for i in range(i + 1):\n x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin\n annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders\n if paths:\n annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames\n if len(targets) > 0:\n ti = targets[targets[:, 0] == i] # image targets, (n, [img_index clsid cx cy l s theta gaussian_θ_labels])\n # boxes = xywh2xyxy(ti[:, 2:6]).T\n rboxes = ti[:, 2:7]\n classes = ti[:, 1].astype('int')\n # labels = ti.shape[1] == 6 # labels if no conf column\n labels = ti.shape[1] == 187 # labels if no conf column\n # conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred)\n conf = None if labels else ti[:, 7] # check for confidence presence (label vs pred)\n\n # if boxes.shape[1]:\n # if boxes.max() <= 1.01: # if normalized with tolerance 0.01\n # boxes[[0, 2]] *= w # scale to pixels\n # boxes[[1, 3]] *= h\n # elif scale < 1: # absolute coords need scale if image scales\n # boxes *= scale\n polys = rbox2poly(rboxes)\n if scale < 1:\n polys *= scale\n # boxes[[0, 2]] += x\n # boxes[[1, 3]] += y\n polys[:, [0, 2, 4, 6]] += x\n polys[:, [1, 3, 5, 7]] += y\n # for j, box in enumerate(boxes.T.tolist()):\n # cls = classes[j]\n # color = colors(cls)\n # cls = names[cls] if names else cls\n # if labels or conf[j] > 0.25: # 0.25 conf thresh\n # label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}'\n # annotator.box_label(box, label, color=color)\n for j, poly in enumerate(polys.tolist()):\n cls = classes[j]\n color = colors(cls)\n cls = names[cls] if names else cls\n if labels or conf[j] > 0.25: # 0.25 conf thresh\n label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}' \n annotator.poly_label(poly, label, color=color)\n annotator.im.save(fname) # save" }, { "identifier": "plot_val_study", "path": "utils/plots.py", "snippet": "def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study()\n # Plot file=study.txt generated by val.py (or plot all study*.txt in dir)\n save_dir = Path(file).parent if file else Path(dir)\n plot2 = False # plot additional results\n if plot2:\n ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel()\n\n fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)\n # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]:\n for f in sorted(save_dir.glob('study*.txt')):\n y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T\n x = np.arange(y.shape[1]) if x is None else np.array(x)\n if plot2:\n s = ['P', 'R', '[email protected]', '[email protected]:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)']\n for i in range(7):\n ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)\n ax[i].set_title(s[i])\n\n j = y[3].argmax() + 1\n ax2.plot(y[5, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8,\n label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))\n\n ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],\n 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')\n\n ax2.grid(alpha=0.2)\n ax2.set_yticks(np.arange(20, 60, 5))\n ax2.set_xlim(0, 57)\n ax2.set_ylim(25, 55)\n ax2.set_xlabel('GPU Speed (ms/img)')\n ax2.set_ylabel('COCO AP val')\n ax2.legend(loc='lower right')\n f = save_dir / 'study.png'\n print(f'Saving {f}...')\n plt.savefig(f, dpi=300)" }, { "identifier": "select_device", "path": "utils/torch_utils.py", "snippet": "def select_device(device='', batch_size=0, newline=True):\n # device = 'cpu' or '0' or '0,1,2,3'\n s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string\n device = str(device).strip().lower().replace('cuda:', '') # to string, 'cuda:0' to '0'\n cpu = device == 'cpu'\n if cpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable\n assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability\n\n cuda = not cpu and torch.cuda.is_available()\n if cuda:\n devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7\n n = len(devices) # device count\n if n > 1 and batch_size > 0: # check batch_size is divisible by device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * (len(s) + 1)\n for i, d in enumerate(devices):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2:.0f}MiB)\\n\" # bytes to MB\n else:\n s += 'CPU\\n'\n\n if not newline:\n s = s.rstrip()\n LOGGER.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe\n return torch.device('cuda:0' if cuda else 'cpu')" }, { "identifier": "time_sync", "path": "utils/torch_utils.py", "snippet": "def time_sync():\n # pytorch-accurate time\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n return time.time()" } ]
import argparse import json import os import sys import numpy as np import torch from pathlib import Path from threading import Thread from tqdm import tqdm from utils.rboxs_utils import poly2hbb, rbox2poly from models.common import DetectMultiBackend from utils.callbacks import Callbacks from utils.datasets import create_dataloader from utils.general import (LOGGER, box_iou, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, scale_coords, scale_polys, xywh2xyxy, xyxy2xywh, non_max_suppression_obb) from utils.metrics import ConfusionMatrix, ap_per_class from utils.plots import output_to_target, plot_images, plot_val_study from utils.torch_utils import select_device, time_sync from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval
12,121
FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative def save_one_txt(predn, save_conf, shape, file): # Save one txt result gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(file, 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') # def save_one_json(predn, jdict, path, class_map): def save_one_json(pred_hbbn, pred_polyn, jdict, path, class_map): """ Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236, "poly": [...]} Args: pred_hbbn (tensor): (n, [poly, conf, cls]) pred_polyn (tensor): (n, [xyxy, conf, cls]) """ image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(pred_hbbn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for p, b in zip(pred_polyn.tolist(), box.tolist()): jdict.append({'image_id': image_id, 'category_id': class_map[int(p[-1]) + 1], # COCO's category_id start from 1, not 0 'bbox': [round(x, 1) for x in b], 'score': round(p[-2], 5), 'poly': [round(x, 1) for x in p[:8]], 'file_name': path.stem}) def process_batch(detections, labels, iouv): """ Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format. Arguments: detections (Array[N, 6]), x1, y1, x2, y2, conf, class labels (Array[M, 5]), class, x1, y1, x2, y2 Returns: correct (Array[N, 10]), for 10 IoU levels """ correct = torch.zeros(detections.shape[0], iouv.shape[0], dtype=torch.bool, device=iouv.device) iou = box_iou(labels[:, 1:], detections[:, :4]) x = torch.where((iou >= iouv[0]) & (labels[:, 0:1] == detections[:, 5])) # IoU above threshold and classes match if x[0].shape[0]: matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detection, iou] if x[0].shape[0] > 1: matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 1], return_index=True)[1]] # matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 0], return_index=True)[1]] matches = torch.Tensor(matches).to(iouv.device) correct[matches[:, 1].long()] = matches[:, 2:3] >= iouv return correct @torch.no_grad() def run(data, weights=None, # model.pt path(s) batch_size=32, # batch size imgsz=640, # inference size (pixels) conf_thres=0.01, # confidence threshold iou_thres=0.4, # NMS IoU threshold task='val', # train, val, test, speed or study device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu workers=8, # max dataloader workers (per RANK in DDP mode) single_cls=False, # treat as single-class dataset augment=False, # augmented inference verbose=False, # verbose output save_txt=False, # save results to *.txt save_hybrid=False, # save label+prediction hybrid results to *.txt save_conf=False, # save confidences in --save-txt labels save_json=False, # save a COCO-JSON results file project=ROOT / 'runs/val', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment half=True, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference model=None, dataloader=None, save_dir=Path(''), plots=True, callbacks=Callbacks(), compute_loss=None, ): # Initialize/load model and set device training = model is not None if training: # called by train.py device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() else: # called directly device = select_device(device, batch_size=batch_size) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn) stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA if pt or jit: model.model.half() if half else model.model.float() elif engine: batch_size = model.batch_size else: half = False batch_size = 1 # export.py models default to batch-size 1 device = torch.device('cpu')
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Validate a trained YOLOv5 model accuracy on a custom dataset Usage: $ python path/to/val.py --data coco128.yaml --weights yolov5s.pt --img 640 """ FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative def save_one_txt(predn, save_conf, shape, file): # Save one txt result gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(file, 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') # def save_one_json(predn, jdict, path, class_map): def save_one_json(pred_hbbn, pred_polyn, jdict, path, class_map): """ Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236, "poly": [...]} Args: pred_hbbn (tensor): (n, [poly, conf, cls]) pred_polyn (tensor): (n, [xyxy, conf, cls]) """ image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(pred_hbbn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for p, b in zip(pred_polyn.tolist(), box.tolist()): jdict.append({'image_id': image_id, 'category_id': class_map[int(p[-1]) + 1], # COCO's category_id start from 1, not 0 'bbox': [round(x, 1) for x in b], 'score': round(p[-2], 5), 'poly': [round(x, 1) for x in p[:8]], 'file_name': path.stem}) def process_batch(detections, labels, iouv): """ Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format. Arguments: detections (Array[N, 6]), x1, y1, x2, y2, conf, class labels (Array[M, 5]), class, x1, y1, x2, y2 Returns: correct (Array[N, 10]), for 10 IoU levels """ correct = torch.zeros(detections.shape[0], iouv.shape[0], dtype=torch.bool, device=iouv.device) iou = box_iou(labels[:, 1:], detections[:, :4]) x = torch.where((iou >= iouv[0]) & (labels[:, 0:1] == detections[:, 5])) # IoU above threshold and classes match if x[0].shape[0]: matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detection, iou] if x[0].shape[0] > 1: matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 1], return_index=True)[1]] # matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 0], return_index=True)[1]] matches = torch.Tensor(matches).to(iouv.device) correct[matches[:, 1].long()] = matches[:, 2:3] >= iouv return correct @torch.no_grad() def run(data, weights=None, # model.pt path(s) batch_size=32, # batch size imgsz=640, # inference size (pixels) conf_thres=0.01, # confidence threshold iou_thres=0.4, # NMS IoU threshold task='val', # train, val, test, speed or study device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu workers=8, # max dataloader workers (per RANK in DDP mode) single_cls=False, # treat as single-class dataset augment=False, # augmented inference verbose=False, # verbose output save_txt=False, # save results to *.txt save_hybrid=False, # save label+prediction hybrid results to *.txt save_conf=False, # save confidences in --save-txt labels save_json=False, # save a COCO-JSON results file project=ROOT / 'runs/val', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment half=True, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference model=None, dataloader=None, save_dir=Path(''), plots=True, callbacks=Callbacks(), compute_loss=None, ): # Initialize/load model and set device training = model is not None if training: # called by train.py device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() else: # called directly device = select_device(device, batch_size=batch_size) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn) stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA if pt or jit: model.model.half() if half else model.model.float() elif engine: batch_size = model.batch_size else: half = False batch_size = 1 # export.py models default to batch-size 1 device = torch.device('cpu')
LOGGER.info(f'Forcing --batch-size 1 square inference shape(1,3,{imgsz},{imgsz}) for non-PyTorch backends')
5
2023-10-31 06:06:41+00:00
16k
DataCanvasIO/LMS
lms/runtime/prune/llm_pruner/LLMPruner/peft/peft_model.py
[ { "identifier": "LoraModel", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/tuners/lora.py", "snippet": "class LoraModel(torch.nn.Module):\n \"\"\"\n Creates Low Rank Adapter (Lora) model from a pretrained transformers model.\n\n Args:\n model ([`~transformers.PreTrainedModel`]): The model to be adapted.\n config ([`LoraConfig`]): The configuration of the Lora model.\n\n Returns:\n `torch.nn.Module`: The Lora model.\n\n Example:\n\n ```py\n >>> from transformers import AutoModelForSeq2SeqLM, LoraConfig\n >>> from peft import LoraModel, LoraConfig\n\n >>> config = LoraConfig(\n ... peft_type=\"LORA\",\n ... task_type=\"SEQ_2_SEQ_LM\",\n ... r=8,\n ... lora_alpha=32,\n ... target_modules=[\"q\", \"v\"],\n ... lora_dropout=0.01,\n ... )\n\n >>> model = AutoModelForSeq2SeqLM.from_pretrained(\"t5-base\")\n >>> lora_model = LoraModel(config, model)\n ```\n\n **Attributes**:\n - **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted.\n - **peft_config** ([`LoraConfig`]): The configuration of the Lora model.\n \"\"\"\n\n def __init__(self, model, config, adapter_name):\n super().__init__()\n self.model = model\n self.forward = self.model.forward\n self.peft_config = config\n self.add_adapter(adapter_name, self.peft_config[adapter_name])\n\n def add_adapter(self, adapter_name, config=None):\n if config is not None:\n model_config = self.model.config.to_dict() if hasattr(self.model.config, \"to_dict\") else self.model.config\n config = self._prepare_lora_config(config, model_config)\n self.peft_config[adapter_name] = config\n self._find_and_replace(adapter_name)\n if len(self.peft_config) > 1 and self.peft_config[adapter_name].bias != \"none\":\n raise ValueError(\n \"LoraModel supports only 1 adapter with bias. When using multiple adapters, set bias to 'none' for all adapters.\"\n )\n mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias)\n if self.peft_config[adapter_name].inference_mode:\n _freeze_adapter(self.model, adapter_name)\n\n def _find_and_replace(self, adapter_name):\n lora_config = self.peft_config[adapter_name]\n loaded_in_8bit = getattr(self.model, \"is_loaded_in_8bit\", False)\n if loaded_in_8bit and not is_bnb_available():\n raise ImportError(\n \"To use Lora with 8-bit quantization, please install the `bitsandbytes` package. \"\n \"You can install it with `pip install bitsandbytes`.\"\n )\n is_target_modules_in_base_model = False\n kwargs = {\n \"r\": lora_config.r,\n \"lora_alpha\": lora_config.lora_alpha,\n \"lora_dropout\": lora_config.lora_dropout,\n \"fan_in_fan_out\": lora_config.fan_in_fan_out,\n \"init_lora_weights\": lora_config.init_lora_weights,\n }\n key_list = [key for key, _ in self.model.named_modules()]\n for key in key_list:\n if isinstance(lora_config.target_modules, str):\n target_module_found = re.fullmatch(lora_config.target_modules, key)\n else:\n target_module_found = any(key.endswith(target_key) for target_key in lora_config.target_modules)\n if target_module_found:\n if not is_target_modules_in_base_model:\n is_target_modules_in_base_model = True\n parent, target, target_name = _get_submodules(self.model, key)\n bias = target.bias is not None\n if isinstance(target, LoraLayer):\n target.update_layer(\n adapter_name,\n lora_config.r,\n lora_config.lora_alpha,\n lora_config.lora_dropout,\n lora_config.init_lora_weights,\n )\n else:\n if loaded_in_8bit and isinstance(target, bnb.nn.Linear8bitLt):\n eightbit_kwargs = kwargs.copy()\n eightbit_kwargs.update(\n {\n \"has_fp16_weights\": target.state.has_fp16_weights,\n \"memory_efficient_backward\": target.state.memory_efficient_backward,\n \"threshold\": target.state.threshold,\n \"index\": target.index,\n }\n )\n new_module = Linear8bitLt(\n adapter_name, target.in_features, target.out_features, bias=bias, **eightbit_kwargs\n )\n else:\n if isinstance(target, torch.nn.Linear):\n in_features, out_features = target.in_features, target.out_features\n if kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. \"\n \"Setting fan_in_fan_out to False.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = False\n elif isinstance(target, Conv1D):\n in_features, out_features = (\n target.weight.ds_shape if hasattr(target.weight, \"ds_shape\") else target.weight.shape\n )\n if not kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to False but the target module is `Conv1D`. \"\n \"Setting fan_in_fan_out to True.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = True\n else:\n raise ValueError(\n f\"Target module {target} is not supported. \"\n f\"Currently, only `torch.nn.Linear` and `Conv1D` are supported.\"\n )\n new_module = Linear(adapter_name, in_features, out_features, bias=bias, **kwargs)\n\n self._replace_module(parent, target_name, new_module, target)\n if not is_target_modules_in_base_model:\n raise ValueError(\n f\"Target modules {lora_config.target_modules} not found in the base model. \"\n f\"Please check the target modules and try again.\"\n )\n\n def _replace_module(self, parent_module, child_name, new_module, old_module):\n setattr(parent_module, child_name, new_module)\n new_module.weight = old_module.weight\n if old_module.bias is not None:\n new_module.bias = old_module.bias\n if getattr(old_module, \"state\", None) is not None:\n new_module.state = old_module.state\n new_module.to(old_module.weight.device)\n\n # dispatch to correct device\n for name, module in new_module.named_modules():\n if \"lora_\" in name:\n module.to(old_module.weight.device)\n\n def __getattr__(self, name: str):\n \"\"\"Forward missing attributes to the wrapped module.\"\"\"\n try:\n return super().__getattr__(name) # defer to nn.Module's logic\n except AttributeError:\n return getattr(self.model, name)\n\n def get_peft_config_as_dict(self, inference: bool = False):\n config_dict = {}\n for key, value in self.peft_config.items():\n config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()}\n if inference:\n config[\"inference_mode\"] = True\n config_dict[key] = config\n return config\n\n def _set_adapter_layers(self, enabled=True):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n module.disable_adapters = False if enabled else True\n\n def enable_adapter_layers(self):\n self._set_adapter_layers(enabled=True)\n\n def disable_adapter_layers(self):\n self._set_adapter_layers(enabled=False)\n\n def set_adapter(self, adapter_name):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n if module.merged:\n warnings.warn(\"Adapter cannot be set when the model is merged. Unmerging the model first.\")\n module.unmerge()\n module.active_adapter = adapter_name\n\n def merge_adapter(self):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n module.merge()\n\n def unmerge_adapter(self):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n module.unmerge()\n\n @staticmethod\n def _prepare_lora_config(peft_config, model_config):\n if peft_config.target_modules is None:\n if model_config[\"model_type\"] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING:\n raise ValueError(\"Please specify `target_modules` in `peft_config`\")\n peft_config.target_modules = TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config[\"model_type\"]]\n if peft_config.inference_mode:\n peft_config.merge_weights = True\n return peft_config\n\n def merge_and_unload(self):\n r\"\"\"\n This method merges the LoRa layers into the base model. This is needed if someone wants to use the base model\n as a standalone model.\n \"\"\"\n if getattr(self.config, \"model_type\", None) == \"gpt2\":\n raise ValueError(\"GPT2 models are not supported for merging LORA layers\")\n\n if getattr(self.model, \"is_loaded_in_8bit\", False):\n raise ValueError(\"Cannot merge LORA layers when the model is loaded in 8-bit mode\")\n\n key_list = [key for key, _ in self.model.named_modules() if \"lora\" not in key]\n for key in key_list:\n try:\n parent, target, target_name = _get_submodules(self.model, key)\n except AttributeError:\n continue\n if isinstance(target, LoraLayer):\n bias = target.bias is not None\n new_module = torch.nn.Linear(target.in_features, target.out_features, bias=bias)\n target.merge()\n self._replace_module(parent, target_name, new_module, target)\n\n # save any additional trainable modules part of `modules_to_save`\n if isinstance(target, ModulesToSaveWrapper):\n setattr(parent, target_name, target.modules_to_save[target.active_adapter])\n\n return self.model\n\n def add_weighted_adapter(self, adapters, weights, adapter_name):\n if len({self.peft_config[adapter].r for adapter in adapters}) != 1:\n raise ValueError(\"All adapters must have the same r value\")\n self.peft_config[adapter_name] = self.peft_config[adapters[0]]\n self.peft_config[adapter_name].lora_alpha = self.peft_config[adapters[0]].r\n self._find_and_replace(adapter_name)\n mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias)\n _freeze_adapter(self.model, adapter_name)\n key_list = [key for key, _ in self.model.named_modules() if \"lora\" not in key]\n for key in key_list:\n _, target, _ = _get_submodules(self.model, key)\n if isinstance(target, LoraLayer):\n target.lora_A[adapter_name].weight.data = target.lora_A[adapter_name].weight.data * 0.0\n target.lora_B[adapter_name].weight.data = target.lora_B[adapter_name].weight.data * 0.0\n for adapter, weight in zip(adapters, weights):\n if adapter not in target.lora_A:\n continue\n target.lora_A[adapter_name].weight.data += (\n target.lora_A[adapter].weight.data * weight * target.scaling[adapter]\n )\n target.lora_B[adapter_name].weight.data += target.lora_B[adapter].weight.data * weight" }, { "identifier": "AdaLoraModel", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/tuners/adalora.py", "snippet": "class AdaLoraModel(LoraModel):\n \"\"\"\n Creates AdaLoRA (Adaptive LoRA) model from a pretrained transformers model. Paper:\n https://openreview.net/pdf?id=lq62uWRJjiY\n\n Args:\n model ([`transformers.PreTrainedModel`]): The model to be adapted.\n config ([`AdaLoraConfig`]): The configuration of the AdaLora model.\n\n Returns:\n `torch.nn.Module`: The AdaLora model.\n\n Example::\n\n >>> from transformers import AutoModelForSeq2SeqLM, LoraConfig >>> from peft import AdaLoraModel, AdaLoraConfig\n >>> config = AdaLoraConfig(\n peft_type=\"ADALORA\", task_type=\"SEQ_2_SEQ_LM\", r=8, lora_alpha=32, target_modules=[\"q\", \"v\"],\n lora_dropout=0.01,\n )\n >>> model = AutoModelForSeq2SeqLM.from_pretrained(\"t5-base\") >>> model = AdaLoraModel(config, model)\n\n **Attributes**:\n - **model** ([`transformers.PreTrainedModel`]) -- The model to be adapted.\n - **peft_config** ([`AdaLoraConfig`]): The configuration of the AdaLora model.\n \"\"\"\n\n def __init__(self, model, config, adapter_name):\n nn.Module.__init__(self)\n self.model = model\n self.peft_config = config\n self.add_adapter(adapter_name, self.peft_config[adapter_name])\n\n def add_adapter(self, adapter_name, config=None):\n if config is not None:\n model_config = self.model.config.to_dict() if hasattr(self.model.config, \"to_dict\") else self.model.config\n config = self._prepare_adalora_config(config, model_config)\n self.peft_config[adapter_name] = config\n self._find_and_replace(adapter_name)\n if len(self.peft_config) > 1 and self.peft_config[adapter_name].bias != \"none\":\n raise ValueError(\n \"AdaLoraModel supports only 1 adapter with bias. When using multiple adapters, set bias to 'none' for all adapters.\"\n )\n traininable_mode_counter = 0\n for config in self.peft_config.values():\n if not config.inference_mode:\n traininable_mode_counter += 1\n\n if traininable_mode_counter > 1:\n raise ValueError(\n \"AdaLoraModel supports only 1 trainable adapter. \"\n \"When using multiple adapters, set inference_mode to True for all adapters except the one you want to train.\"\n )\n\n mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias)\n if self.peft_config[adapter_name].inference_mode:\n _freeze_adapter(self.model, adapter_name)\n else:\n self.trainable_adapter_name = adapter_name\n self.rankallocator = RankAllocator(self.model, self.peft_config[adapter_name], self.trainable_adapter_name)\n\n def _find_and_replace(self, adapter_name):\n lora_config = self.peft_config[adapter_name]\n loaded_in_8bit = getattr(self.model, \"is_loaded_in_8bit\", False)\n if loaded_in_8bit and not is_bnb_available():\n raise ImportError(\n \"To use Lora with 8-bit quantization, please install the `bitsandbytes` package. \"\n \"You can install it with `pip install bitsandbytes`.\"\n )\n is_target_modules_in_base_model = False\n kwargs = {\n \"r\": lora_config.init_r,\n \"lora_alpha\": lora_config.lora_alpha,\n \"lora_dropout\": lora_config.lora_dropout,\n \"fan_in_fan_out\": lora_config.fan_in_fan_out,\n \"init_lora_weights\": lora_config.init_lora_weights,\n }\n key_list = [key for key, _ in self.model.named_modules()]\n for key in key_list:\n if isinstance(lora_config.target_modules, str):\n target_module_found = re.fullmatch(lora_config.target_modules, key)\n else:\n target_module_found = any(key.endswith(target_key) for target_key in lora_config.target_modules)\n if target_module_found:\n if not is_target_modules_in_base_model:\n is_target_modules_in_base_model = True\n parent, target, target_name = _get_submodules(self.model, key)\n bias = target.bias is not None\n if isinstance(target, LoraLayer):\n target.update_layer(\n adapter_name,\n lora_config.init_r,\n lora_config.lora_alpha,\n lora_config.lora_dropout,\n lora_config.init_lora_weights,\n )\n else:\n if loaded_in_8bit and isinstance(target, bnb.nn.Linear8bitLt):\n kwargs.update(\n {\n \"has_fp16_weights\": target.state.has_fp16_weights,\n \"memory_efficient_backward\": target.state.memory_efficient_backward,\n \"threshold\": target.state.threshold,\n \"index\": target.index,\n }\n )\n new_module = SVDLinear8bitLt(\n adapter_name, target.in_features, target.out_features, bias=bias, **kwargs\n )\n else:\n if isinstance(target, torch.nn.Linear):\n in_features, out_features = target.in_features, target.out_features\n if kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. \"\n \"Setting fan_in_fan_out to False.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = False\n elif isinstance(target, Conv1D):\n in_features, out_features = (\n target.weight.ds_shape if hasattr(target.weight, \"ds_shape\") else target.weight.shape\n )\n if not kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to False but the target module is `Conv1D`. \"\n \"Setting fan_in_fan_out to True.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = True\n else:\n raise ValueError(\n f\"Target module {target} is not supported. \"\n f\"Currently, only `torch.nn.Linear` and `Conv1D` are supported.\"\n )\n new_module = SVDLinear(adapter_name, in_features, out_features, bias=bias, **kwargs)\n\n self._replace_module(parent, target_name, new_module, target)\n if not is_target_modules_in_base_model:\n raise ValueError(\n f\"Target modules {lora_config.target_modules} not found in the base model. \"\n f\"Please check the target modules and try again.\"\n )\n\n def __getattr__(self, name: str):\n \"\"\"Forward missing attributes to the wrapped module.\"\"\"\n try:\n return super().__getattr__(name) # defer to nn.Module's logic\n except AttributeError:\n return getattr(self.model, name)\n\n def forward(self, *args, **kwargs):\n outputs = self.model.forward(*args, **kwargs)\n\n # Calculate the orthogonal regularization\n orth_reg_weight = self.peft_config[self.trainable_adapter_name].orth_reg_weight\n assert orth_reg_weight > 0\n\n if hasattr(outputs, \"loss\"):\n regu_loss = 0\n num_param = 0\n for n, p in self.model.named_parameters():\n if (\"lora_A\" in n or \"lora_B\" in n) and self.trainable_adapter_name in n:\n para_cov = p @ p.T if \"lora_A\" in n else p.T @ p\n I = torch.eye(*para_cov.size(), out=torch.empty_like(para_cov))\n I.requires_grad = False\n num_param += 1\n regu_loss += torch.norm(para_cov - I, p=\"fro\")\n regu_loss = regu_loss / num_param\n outputs.loss += orth_reg_weight * regu_loss\n return outputs\n\n def resize_modules_by_rank_pattern(self, rank_pattern, adapter_name):\n lora_config = self.peft_config[adapter_name]\n for name, rank_idx in rank_pattern.items():\n if isinstance(rank_idx, list):\n rank = sum(rank_idx)\n elif isinstance(rank_idx, torch.Tensor):\n rank_idx = rank_idx.view(-1)\n rank = rank_idx.sum().item()\n else:\n raise ValueError(\"Unexcepted type of rank_idx\")\n key = \".\".join(name.split(\".\")[0:-2]) if adapter_name in name else \".\".join(name.split(\".\")[0:-1])\n _, target, _ = _get_submodules(self.model, key)\n lora_E_weights = target.lora_E[adapter_name][rank_idx]\n lora_A_weights = target.lora_A[adapter_name][rank_idx]\n lora_B_weights = target.lora_B[adapter_name][:, rank_idx]\n ranknum = target.ranknum[adapter_name]\n target.update_layer(\n adapter_name,\n rank,\n lora_config.lora_alpha,\n lora_config.lora_dropout,\n lora_config.init_lora_weights,\n )\n with torch.no_grad():\n if rank > 0:\n target.lora_E[adapter_name].copy_(lora_E_weights)\n target.lora_A[adapter_name].copy_(lora_A_weights)\n target.lora_B[adapter_name].copy_(lora_B_weights)\n # The scaling is exactly as the previous\n target.ranknum[adapter_name].copy_(ranknum)\n\n def resize_state_dict_by_rank_pattern(self, rank_pattern, state_dict, adapter_name):\n for name, rank_idx in rank_pattern.items():\n rank = sum(rank_idx)\n prefix = \".\".join(name.split(\".\")[0:-2]) if adapter_name in name else \".\".join(name.split(\".\")[0:-1])\n for layer in [\"lora_E\", \"lora_A\", \"lora_B\"]:\n key = f\"base_model.model.{prefix}.{layer}.{adapter_name}\"\n if layer != \"lora_B\":\n state_dict[key] = (\n state_dict[key][rank_idx] if rank != state_dict[key].shape[0] else state_dict[key]\n )\n else:\n state_dict[key] = (\n state_dict[key][:, rank_idx] if rank != state_dict[key].shape[1] else state_dict[key]\n )\n return state_dict\n\n def update_and_allocate(self, global_step):\n lora_config = self.peft_config[self.trainable_adapter_name]\n # Update the importance score and allocate the budget\n if global_step < lora_config.total_step - lora_config.tfinal:\n _, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step)\n if rank_pattern:\n lora_config.rank_pattern = rank_pattern\n # Finalize the budget allocation\n elif global_step == lora_config.total_step - lora_config.tfinal:\n _, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step, force_mask=True)\n # for some reason, this freezes the trainable parameters and nothing gets updates\n # self.resize_modules_by_rank_pattern(rank_pattern, self.trainable_adapter_name)\n lora_config.rank_pattern = rank_pattern\n self.rankallocator.reset_ipt()\n # Currently using inefficient way to mask the unimportant weights using the rank pattern\n # due to problem mentioned above\n elif global_step > lora_config.total_step - lora_config.tfinal:\n self.rankallocator.mask_using_rank_pattern(self.model, lora_config.rank_pattern)\n # Pass the function and do forward propagation\n else:\n return None\n\n @staticmethod\n def _prepare_adalora_config(peft_config, model_config):\n if peft_config.target_modules is None:\n if model_config[\"model_type\"] not in TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING:\n raise ValueError(\"Please specify `target_modules` in `peft_config`\")\n peft_config.target_modules = TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING[\n model_config[\"model_type\"]\n ]\n if peft_config.inference_mode:\n peft_config.merge_weights = True\n return peft_config" }, { "identifier": "PromptEncoder", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/tuners/p_tuning.py", "snippet": "class PromptEncoder(torch.nn.Module):\n \"\"\"\n The prompt encoder network that is used to generate the virtual token embeddings for p-tuning.\n\n Args:\n config ([`PromptEncoderConfig`]): The configuration of the prompt encoder.\n\n Example:\n\n ```py\n >>> from peft import PromptEncoder, PromptEncoderConfig\n\n >>> config = PromptEncoderConfig(\n ... peft_type=\"P_TUNING\",\n ... task_type=\"SEQ_2_SEQ_LM\",\n ... num_virtual_tokens=20,\n ... token_dim=768,\n ... num_transformer_submodules=1,\n ... num_attention_heads=12,\n ... num_layers=12,\n ... encoder_reparameterization_type=\"MLP\",\n ... encoder_hidden_size=768,\n ... )\n\n >>> prompt_encoder = PromptEncoder(config)\n ```\n\n **Attributes**:\n - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prompt encoder.\n - **mlp_head** (`torch.nn.Sequential`) -- The MLP head of the prompt encoder if `inference_mode=False`.\n - **lstm_head** (`torch.nn.LSTM`) -- The LSTM head of the prompt encoder if `inference_mode=False` and\n `encoder_reparameterization_type=\"LSTM\"`.\n - **token_dim** (`int`) -- The hidden embedding dimension of the base transformer model.\n - **input_size** (`int`) -- The input size of the prompt encoder.\n - **output_size** (`int`) -- The output size of the prompt encoder.\n - **hidden_size** (`int`) -- The hidden size of the prompt encoder.\n - **total_virtual_tokens** (`int`): The total number of virtual tokens of the\n prompt encoder.\n - **encoder_type** (Union[[`PromptEncoderReparameterizationType`], `str`]): The encoder type of the prompt\n encoder.\n\n\n Input shape: (`batch_size`, `total_virtual_tokens`)\n\n Output shape: (`batch_size`, `total_virtual_tokens`, `token_dim`)\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.token_dim = config.token_dim\n self.input_size = self.token_dim\n self.output_size = self.token_dim\n self.hidden_size = config.encoder_hidden_size\n self.total_virtual_tokens = config.num_virtual_tokens * config.num_transformer_submodules\n self.encoder_type = config.encoder_reparameterization_type\n\n # embedding\n self.embedding = torch.nn.Embedding(self.total_virtual_tokens, self.token_dim)\n if not config.inference_mode:\n if self.encoder_type == PromptEncoderReparameterizationType.LSTM:\n lstm_dropout = config.encoder_dropout\n num_layers = config.encoder_num_layers\n # LSTM\n self.lstm_head = torch.nn.LSTM(\n input_size=self.input_size,\n hidden_size=self.hidden_size,\n num_layers=num_layers,\n dropout=lstm_dropout,\n bidirectional=True,\n batch_first=True,\n )\n\n self.mlp_head = torch.nn.Sequential(\n torch.nn.Linear(self.hidden_size * 2, self.hidden_size * 2),\n torch.nn.ReLU(),\n torch.nn.Linear(self.hidden_size * 2, self.output_size),\n )\n\n elif self.encoder_type == PromptEncoderReparameterizationType.MLP:\n warnings.warn(\n f\"for {self.encoder_type}, the `encoder_num_layers` is ignored. Exactly 2 MLP layers are used.\"\n )\n layers = [\n torch.nn.Linear(self.input_size, self.hidden_size),\n torch.nn.ReLU(),\n torch.nn.Linear(self.hidden_size, self.hidden_size),\n torch.nn.ReLU(),\n torch.nn.Linear(self.hidden_size, self.output_size),\n ]\n self.mlp_head = torch.nn.Sequential(*layers)\n\n else:\n raise ValueError(\"Prompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.\")\n\n def forward(self, indices):\n input_embeds = self.embedding(indices)\n if self.encoder_type == PromptEncoderReparameterizationType.LSTM:\n output_embeds = self.mlp_head(self.lstm_head(input_embeds)[0])\n elif self.encoder_type == PromptEncoderReparameterizationType.MLP:\n output_embeds = self.mlp_head(input_embeds)\n else:\n raise ValueError(\"Prompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.\")\n\n return output_embeds" }, { "identifier": "PrefixEncoder", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/tuners/prefix_tuning.py", "snippet": "class PrefixEncoder(torch.nn.Module):\n r\"\"\"\n The `torch.nn` model to encode the prefix.\n\n Args:\n config ([`PrefixTuningConfig`]): The configuration of the prefix encoder.\n\n Example:\n\n ```py\n >>> from peft import PrefixEncoder, PrefixTuningConfig\n\n >>> config = PrefixTuningConfig(\n ... peft_type=\"PREFIX_TUNING\",\n ... task_type=\"SEQ_2_SEQ_LM\",\n ... num_virtual_tokens=20,\n ... token_dim=768,\n ... num_transformer_submodules=1,\n ... num_attention_heads=12,\n ... num_layers=12,\n ... encoder_hidden_size=768,\n ... )\n >>> prefix_encoder = PrefixEncoder(config)\n ```\n\n **Attributes**:\n - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prefix encoder.\n - **transform** (`torch.nn.Sequential`) -- The two-layer MLP to transform the prefix embeddings if\n `prefix_projection` is `True`.\n - **prefix_projection** (`bool`) -- Whether to project the prefix embeddings.\n\n Input shape: (`batch_size`, `num_virtual_tokens`)\n\n Output shape: (`batch_size`, `num_virtual_tokens`, `2*layers*hidden`)\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.prefix_projection = config.prefix_projection\n token_dim = config.token_dim\n num_layers = config.num_layers\n encoder_hidden_size = config.encoder_hidden_size\n num_virtual_tokens = config.num_virtual_tokens\n if self.prefix_projection and not config.inference_mode:\n # Use a two-layer MLP to encode the prefix\n self.embedding = torch.nn.Embedding(num_virtual_tokens, token_dim)\n self.transform = torch.nn.Sequential(\n torch.nn.Linear(token_dim, encoder_hidden_size),\n torch.nn.Tanh(),\n torch.nn.Linear(encoder_hidden_size, num_layers * 2 * token_dim),\n )\n else:\n self.embedding = torch.nn.Embedding(num_virtual_tokens, num_layers * 2 * token_dim)\n\n def forward(self, prefix: torch.Tensor):\n if self.prefix_projection:\n prefix_tokens = self.embedding(prefix)\n past_key_values = self.transform(prefix_tokens)\n else:\n past_key_values = self.embedding(prefix)\n return past_key_values" }, { "identifier": "PromptEmbedding", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/tuners/prompt_tuning.py", "snippet": "class PromptEmbedding(torch.nn.Module):\n \"\"\"\n The model to encode virtual tokens into prompt embeddings.\n\n Args:\n config ([`PromptTuningConfig`]): The configuration of the prompt embedding.\n word_embeddings (`torch.nn.Module`): The word embeddings of the base transformer model.\n\n **Attributes**:\n - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prompt embedding.\n\n Example:\n\n ```py\n >>> from peft import PromptEmbedding, PromptTuningConfig\n\n >>> config = PromptTuningConfig(\n ... peft_type=\"PROMPT_TUNING\",\n ... task_type=\"SEQ_2_SEQ_LM\",\n ... num_virtual_tokens=20,\n ... token_dim=768,\n ... num_transformer_submodules=1,\n ... num_attention_heads=12,\n ... num_layers=12,\n ... prompt_tuning_init=\"TEXT\",\n ... prompt_tuning_init_text=\"Predict if sentiment of this review is positive, negative or neutral\",\n ... tokenizer_name_or_path=\"t5-base\",\n ... )\n\n >>> # t5_model.shared is the word embeddings of the base model\n >>> prompt_embedding = PromptEmbedding(config, t5_model.shared)\n ```\n\n Input Shape: (`batch_size`, `total_virtual_tokens`)\n\n Output Shape: (`batch_size`, `total_virtual_tokens`, `token_dim`)\n \"\"\"\n\n def __init__(self, config, word_embeddings):\n super().__init__()\n\n total_virtual_tokens = config.num_virtual_tokens * config.num_transformer_submodules\n self.embedding = torch.nn.Embedding(total_virtual_tokens, config.token_dim)\n if config.prompt_tuning_init == PromptTuningInit.TEXT:\n from transformers import AutoTokenizer\n\n tokenizer = AutoTokenizer.from_pretrained(config.tokenizer_name_or_path)\n init_text = config.prompt_tuning_init_text\n init_token_ids = tokenizer(init_text)[\"input_ids\"]\n # Trim or iterate until num_text_tokens matches total_virtual_tokens\n num_text_tokens = len(init_token_ids)\n if num_text_tokens > total_virtual_tokens:\n init_token_ids = init_token_ids[:total_virtual_tokens]\n elif num_text_tokens < total_virtual_tokens:\n num_reps = math.ceil(total_virtual_tokens / num_text_tokens)\n init_token_ids = init_token_ids * num_reps\n init_token_ids = init_token_ids[:total_virtual_tokens]\n\n word_embedding_weights = word_embeddings(torch.LongTensor(init_token_ids)).detach().clone()\n word_embedding_weights = word_embedding_weights.to(torch.float32)\n self.embedding.weight = torch.nn.Parameter(word_embedding_weights)\n\n def forward(self, indices):\n # Just get embeddings\n prompt_embeddings = self.embedding(indices)\n return prompt_embeddings" }, { "identifier": "PeftConfig", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/config.py", "snippet": "class PeftConfig(PeftConfigMixin):\n \"\"\"\n This is the base configuration class to store the configuration of a [`PeftModel`].\n\n Args:\n peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use.\n task_type (Union[[`~peft.utils.config.TaskType`], `str`]): The type of task to perform.\n inference_mode (`bool`, defaults to `False`): Whether to use the Peft model in inference mode.\n \"\"\"\n\n base_model_name_or_path: str = field(default=None, metadata={\"help\": \"The name of the base model to use.\"})\n peft_type: Union[str, PeftType] = field(default=None, metadata={\"help\": \"Peft type\"})\n task_type: Union[str, TaskType] = field(default=None, metadata={\"help\": \"Task type\"})\n inference_mode: bool = field(default=False, metadata={\"help\": \"Whether to use inference mode\"})" }, { "identifier": "PeftType", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/config.py", "snippet": "class PeftType(str, enum.Enum):\n PROMPT_TUNING = \"PROMPT_TUNING\"\n P_TUNING = \"P_TUNING\"\n PREFIX_TUNING = \"PREFIX_TUNING\"\n LORA = \"LORA\"\n ADALORA = \"ADALORA\"" }, { "identifier": "PromptLearningConfig", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/config.py", "snippet": "class PromptLearningConfig(PeftConfig):\n \"\"\"\n This is the base configuration class to store the configuration of [`PrefixTuning`], [`PromptEncoder`], or\n [`PromptTuning`].\n\n Args:\n num_virtual_tokens (`int`): The number of virtual tokens to use.\n token_dim (`int`): The hidden embedding dimension of the base transformer model.\n num_transformer_submodules (`int`): The number of transformer submodules in the base transformer model.\n num_attention_heads (`int`): The number of attention heads in the base transformer model.\n num_layers (`int`): The number of layers in the base transformer model.\n \"\"\"\n\n num_virtual_tokens: int = field(default=None, metadata={\"help\": \"Number of virtual tokens\"})\n token_dim: int = field(\n default=None, metadata={\"help\": \"The hidden embedding dimension of the base transformer model\"}\n )\n num_transformer_submodules: Optional[int] = field(\n default=None, metadata={\"help\": \"Number of transformer submodules\"}\n )\n num_attention_heads: Optional[int] = field(default=None, metadata={\"help\": \"Number of attention heads\"})\n num_layers: Optional[int] = field(default=None, metadata={\"help\": \"Number of transformer layers\"})" }, { "identifier": "TaskType", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/config.py", "snippet": "class TaskType(str, enum.Enum):\n SEQ_CLS = \"SEQ_CLS\"\n SEQ_2_SEQ_LM = \"SEQ_2_SEQ_LM\"\n CAUSAL_LM = \"CAUSAL_LM\"\n TOKEN_CLS = \"TOKEN_CLS\"" }, { "identifier": "TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/other.py", "snippet": "TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING = {\n \"bloom\": bloom_model_postprocess_past_key_value,\n}" }, { "identifier": "WEIGHTS_NAME", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/other.py", "snippet": "WEIGHTS_NAME = \"adapter_model.bin\"" }, { "identifier": "_set_trainable", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/other.py", "snippet": "def _set_trainable(model, adapter_name):\n key_list = [key for key, _ in model.named_modules()]\n for key in key_list:\n target_module_found = any(key.endswith(target_key) for target_key in model.modules_to_save)\n if target_module_found:\n parent, target, target_name = _get_submodules(model, key)\n if isinstance(target, ModulesToSaveWrapper):\n target.update(adapter_name)\n else:\n for param in target.parameters():\n param.requires_grad = True\n setattr(parent, target_name, ModulesToSaveWrapper(target, adapter_name))" }, { "identifier": "shift_tokens_right", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/other.py", "snippet": "def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):\n \"\"\"\n Shift input ids one token to the right.\n\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): input ids\n pad_token_id (`int`): The id of the `padding` token.\n decoder_start_token_id (`int`): The id of the `start` token.\n \"\"\"\n shifted_input_ids = input_ids.new_zeros(input_ids.shape)\n shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()\n shifted_input_ids[:, 0] = decoder_start_token_id\n\n if pad_token_id is None:\n raise ValueError(\"self.model.config.pad_token_id has to be defined.\")\n # replace possible -100 values in labels by `pad_token_id`\n shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)\n\n return shifted_input_ids" }, { "identifier": "_set_adapter", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/other.py", "snippet": "def _set_adapter(model, adapter_name):\n for module in model.modules():\n if isinstance(module, ModulesToSaveWrapper):\n module.active_adapter = adapter_name" }, { "identifier": "get_peft_model_state_dict", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/save_and_load.py", "snippet": "def get_peft_model_state_dict(model, state_dict=None, adapter_name=\"default\"):\n \"\"\"\n Get the state dict of the Peft model.\n\n Args:\n model ([`PeftModel`]): The Peft model. When using torch.nn.DistributedDataParallel, DeepSpeed or FSDP,\n the model should be the underlying model/unwrapped model (i.e. model.module).\n state_dict (`dict`, *optional*, defaults to `None`):\n The state dict of the model. If not provided, the state dict of the model\n will be used.\n \"\"\"\n config = model.peft_config[adapter_name]\n if state_dict is None:\n state_dict = model.state_dict()\n if config.peft_type in (PeftType.LORA, PeftType.ADALORA):\n # to_return = lora_state_dict(model, bias=model.peft_config.bias)\n # adapted from `https://github.com/microsoft/LoRA/blob/main/loralib/utils.py`\n # to be used directly with the state dict which is necessary when using DeepSpeed or FSDP\n bias = config.bias\n if bias == \"none\":\n to_return = {k: state_dict[k] for k in state_dict if \"lora_\" in k}\n elif bias == \"all\":\n to_return = {k: state_dict[k] for k in state_dict if \"lora_\" in k or \"bias\" in k}\n elif bias == \"lora_only\":\n to_return = {}\n for k in state_dict:\n if \"lora_\" in k:\n to_return[k] = state_dict[k]\n bias_name = k.split(\"lora_\")[0] + \"bias\"\n if bias_name in state_dict:\n to_return[bias_name] = state_dict[bias_name]\n else:\n raise NotImplementedError\n to_return = {k: v for k, v in to_return.items() if ((\"lora_\" in k and adapter_name in k) or (\"bias\" in k))}\n if config.peft_type == PeftType.ADALORA:\n rank_pattern = config.rank_pattern\n if rank_pattern is not None:\n rank_pattern = {k.replace(f\".{adapter_name}\", \"\"): v for k, v in rank_pattern.items()}\n config.rank_pattern = rank_pattern\n to_return = model.resize_state_dict_by_rank_pattern(rank_pattern, to_return, adapter_name)\n elif isinstance(config, PromptLearningConfig):\n to_return = {}\n if config.inference_mode:\n prompt_embeddings = model.prompt_encoder[adapter_name].embedding.weight\n else:\n prompt_embeddings = model.get_prompt_embedding_to_save(adapter_name)\n to_return[\"prompt_embeddings\"] = prompt_embeddings\n else:\n raise NotImplementedError\n if model.modules_to_save is not None:\n for key, value in state_dict.items():\n if any(f\"{module_name}.modules_to_save.{adapter_name}\" in key for module_name in model.modules_to_save):\n to_return[key.replace(\"modules_to_save.\", \"\")] = value\n\n to_return = {k.replace(f\".{adapter_name}\", \"\"): v for k, v in to_return.items()}\n return to_return" }, { "identifier": "set_peft_model_state_dict", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/save_and_load.py", "snippet": "def set_peft_model_state_dict(model, peft_model_state_dict, adapter_name=\"default\"):\n \"\"\"\n Set the state dict of the Peft model.\n\n Args:\n model ([`PeftModel`]): The Peft model.\n peft_model_state_dict (`dict`): The state dict of the Peft model.\n \"\"\"\n config = model.peft_config[adapter_name]\n state_dict = {}\n if model.modules_to_save is not None:\n for key, value in peft_model_state_dict.items():\n if any(module_name in key for module_name in model.modules_to_save):\n for module_name in model.modules_to_save:\n if module_name in key:\n key = key.replace(module_name, f\"{module_name}.modules_to_save.{adapter_name}\")\n break\n state_dict[key] = value\n else:\n state_dict = peft_model_state_dict\n\n #print(\"config.peft_type: \".format(config.peft_type))\n if config.peft_type in (PeftType.LORA, PeftType.ADALORA):\n peft_model_state_dict = {}\n for k, v in state_dict.items():\n if \"lora_\" in k:\n suffix = k.split(\"lora_\")[1]\n if \".\" in suffix:\n suffix_to_replace = \".\".join(suffix.split(\".\")[1:])\n k = k.replace(suffix_to_replace, f\"{adapter_name}.{suffix_to_replace}\")\n else:\n k = f\"{k}.{adapter_name}\"\n peft_model_state_dict[k] = v\n else:\n peft_model_state_dict[k] = v\n if config.peft_type == PeftType.ADALORA:\n rank_pattern = config.rank_pattern\n if rank_pattern is not None:\n model.resize_modules_by_rank_pattern(rank_pattern, adapter_name)\n elif isinstance(config, PromptLearningConfig):\n peft_model_state_dict = state_dict\n else:\n raise NotImplementedError\n\n model.load_state_dict(peft_model_state_dict, strict=False)\n #exit()\n if isinstance(config, PromptLearningConfig):\n model.prompt_encoder[adapter_name].embedding.load_state_dict(\n {\"weight\": peft_model_state_dict[\"prompt_embeddings\"]}, strict=True\n )" } ]
import inspect import os import warnings import torch from contextlib import contextmanager from accelerate import dispatch_model, infer_auto_device_map from accelerate.hooks import AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules from accelerate.utils import get_balanced_memory from huggingface_hub import hf_hub_download from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers import PreTrainedModel from transformers.modeling_outputs import SequenceClassifierOutput, TokenClassifierOutput from transformers.utils import PushToHubMixin from .tuners import AdaLoraModel, LoraModel, PrefixEncoder, PromptEmbedding, PromptEncoder from .utils import ( TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, WEIGHTS_NAME, PeftConfig, PeftType, PromptLearningConfig, TaskType, _set_adapter, _set_trainable, get_peft_model_state_dict, set_peft_model_state_dict, shift_tokens_right, ) from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING, PEFT_TYPE_TO_CONFIG_MAPPING from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING
11,175
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. PEFT_TYPE_TO_MODEL_MAPPING = { PeftType.LORA: LoraModel, PeftType.PROMPT_TUNING: PromptEmbedding, PeftType.P_TUNING: PromptEncoder, PeftType.PREFIX_TUNING: PrefixEncoder,
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. PEFT_TYPE_TO_MODEL_MAPPING = { PeftType.LORA: LoraModel, PeftType.PROMPT_TUNING: PromptEmbedding, PeftType.P_TUNING: PromptEncoder, PeftType.PREFIX_TUNING: PrefixEncoder,
PeftType.ADALORA: AdaLoraModel,
1
2023-10-30 10:50:32+00:00
16k
chenran-li/RQL-release
sb3_contrib/tqc/tqc.py
[ { "identifier": "ReplayBuffer", "path": "stable_baselines3/common/buffers.py", "snippet": "class ReplayBuffer(BaseBuffer):\n \"\"\"\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n Cannot be used in combination with handle_timeout_termination.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n \"\"\"\n\n def __init__(\n self,\n buffer_size: int,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n device: Union[th.device, str] = \"auto\",\n n_envs: int = 1,\n optimize_memory_usage: bool = False,\n handle_timeout_termination: bool = True,\n ):\n super().__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs)\n\n # Adjust buffer size\n self.buffer_size = max(buffer_size // n_envs, 1)\n\n # Check that the replay buffer can fit into the memory\n if psutil is not None:\n mem_available = psutil.virtual_memory().available\n\n # there is a bug if both optimize_memory_usage and handle_timeout_termination are true\n # see https://github.com/DLR-RM/stable-baselines3/issues/934\n if optimize_memory_usage and handle_timeout_termination:\n raise ValueError(\n \"ReplayBuffer does not support optimize_memory_usage = True \"\n \"and handle_timeout_termination = True simultaneously.\"\n )\n self.optimize_memory_usage = optimize_memory_usage\n\n self.observations = np.zeros((self.buffer_size, self.n_envs) + self.obs_shape, dtype=observation_space.dtype)\n\n if optimize_memory_usage:\n # `observations` contains also the next observation\n self.next_observations = None\n else:\n self.next_observations = np.zeros((self.buffer_size, self.n_envs) + self.obs_shape, dtype=observation_space.dtype)\n\n self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=action_space.dtype)\n\n self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.dones = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n # Handle timeouts termination properly if needed\n # see https://github.com/DLR-RM/stable-baselines3/issues/284\n self.handle_timeout_termination = handle_timeout_termination\n self.timeouts = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n\n if psutil is not None:\n total_memory_usage = self.observations.nbytes + self.actions.nbytes + self.rewards.nbytes + self.dones.nbytes\n\n if self.next_observations is not None:\n total_memory_usage += self.next_observations.nbytes\n\n if total_memory_usage > mem_available:\n # Convert to GB\n total_memory_usage /= 1e9\n mem_available /= 1e9\n warnings.warn(\n \"This system does not have apparently enough memory to store the complete \"\n f\"replay buffer {total_memory_usage:.2f}GB > {mem_available:.2f}GB\"\n )\n\n def add(\n self,\n obs: np.ndarray,\n next_obs: np.ndarray,\n action: np.ndarray,\n reward: np.ndarray,\n done: np.ndarray,\n infos: List[Dict[str, Any]],\n ) -> None:\n\n # Reshape needed when using multiple envs with discrete observations\n # as numpy cannot broadcast (n_discrete,) to (n_discrete, 1)\n if isinstance(self.observation_space, spaces.Discrete):\n obs = obs.reshape((self.n_envs,) + self.obs_shape)\n next_obs = next_obs.reshape((self.n_envs,) + self.obs_shape)\n\n # Same, for actions\n action = action.reshape((self.n_envs, self.action_dim))\n\n # Copy to avoid modification by reference\n self.observations[self.pos] = np.array(obs).copy()\n\n if self.optimize_memory_usage:\n self.observations[(self.pos + 1) % self.buffer_size] = np.array(next_obs).copy()\n else:\n self.next_observations[self.pos] = np.array(next_obs).copy()\n\n self.actions[self.pos] = np.array(action).copy()\n self.rewards[self.pos] = np.array(reward).copy()\n self.dones[self.pos] = np.array(done).copy()\n\n if self.handle_timeout_termination:\n self.timeouts[self.pos] = np.array([info.get(\"TimeLimit.truncated\", False) for info in infos])\n\n self.pos += 1\n if self.pos == self.buffer_size:\n self.full = True\n self.pos = 0\n\n def sample(self, batch_size: int, env: Optional[VecNormalize] = None) -> ReplayBufferSamples:\n \"\"\"\n Sample elements from the replay buffer.\n Custom sampling when using memory efficient variant,\n as we should not sample the element with index `self.pos`\n See https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n\n :param batch_size: Number of element to sample\n :param env: associated gym VecEnv\n to normalize the observations/rewards when sampling\n :return:\n \"\"\"\n if not self.optimize_memory_usage:\n return super().sample(batch_size=batch_size, env=env)\n # Do not sample the element with index `self.pos` as the transitions is invalid\n # (we use only one array to store `obs` and `next_obs`)\n if self.full:\n batch_inds = (np.random.randint(1, self.buffer_size, size=batch_size) + self.pos) % self.buffer_size\n else:\n batch_inds = np.random.randint(0, self.pos, size=batch_size)\n return self._get_samples(batch_inds, env=env)\n\n def _get_samples(self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None) -> ReplayBufferSamples:\n # Sample randomly the env idx\n env_indices = np.random.randint(0, high=self.n_envs, size=(len(batch_inds),))\n\n if self.optimize_memory_usage:\n next_obs = self._normalize_obs(self.observations[(batch_inds + 1) % self.buffer_size, env_indices, :], env)\n else:\n next_obs = self._normalize_obs(self.next_observations[batch_inds, env_indices, :], env)\n\n data = (\n self._normalize_obs(self.observations[batch_inds, env_indices, :], env),\n self.actions[batch_inds, env_indices, :],\n next_obs,\n # Only use dones that are not due to timeouts\n # deactivated by default (timeouts is initialized as an array of False)\n (self.dones[batch_inds, env_indices] * (1 - self.timeouts[batch_inds, env_indices])).reshape(-1, 1),\n self._normalize_reward(self.rewards[batch_inds, env_indices].reshape(-1, 1), env),\n )\n return ReplayBufferSamples(*tuple(map(self.to_torch, data)))" }, { "identifier": "ActionNoise", "path": "stable_baselines3/common/noise.py", "snippet": "class ActionNoise(ABC):\n \"\"\"\n The action noise base class\n \"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n\n def reset(self) -> None:\n \"\"\"\n call end of episode reset for the noise\n \"\"\"\n pass\n\n @abstractmethod\n def __call__(self) -> np.ndarray:\n raise NotImplementedError()" }, { "identifier": "OffPolicyAlgorithm", "path": "stable_baselines3/common/off_policy_algorithm.py", "snippet": "class OffPolicyAlgorithm(BaseAlgorithm):\n \"\"\"\n The base for Off-Policy algorithms (ex: SAC/TD3)\n\n :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)\n :param env: The environment to learn from\n (if registered in Gym, can be str. Can be None for loading trained models)\n :param learning_rate: learning rate for the optimizer,\n it can be a function of the current progress remaining (from 1 to 0)\n :param buffer_size: size of the replay buffer\n :param learning_starts: how many steps of the model to collect transitions for before learning starts\n :param batch_size: Minibatch size for each gradient update\n :param tau: the soft update coefficient (\"Polyak update\", between 0 and 1)\n :param gamma: the discount factor\n :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit\n like ``(5, \"step\")`` or ``(2, \"episode\")``.\n :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``)\n Set to ``-1`` means to do as many gradient steps as steps done in the environment\n during the rollout.\n :param action_noise: the action noise type (None by default), this can help\n for hard exploration problem. Cf common.noise for the different action noise type.\n :param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``).\n If ``None``, it will be automatically selected.\n :param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation.\n :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n :param policy_kwargs: Additional arguments to be passed to the policy on creation\n :param tensorboard_log: the log location for tensorboard (if None, no logging)\n :param verbose: Verbosity level: 0 for no output, 1 for info messages (such as device or wrappers used), 2 for\n debug messages\n :param device: Device on which the code should run.\n By default, it will try to use a Cuda compatible device and fallback to cpu\n if it is not possible.\n :param support_multi_env: Whether the algorithm supports training\n with multiple environments (as in A2C)\n :param monitor_wrapper: When creating an environment, whether to wrap it\n or not in a Monitor wrapper.\n :param seed: Seed for the pseudo random generators\n :param use_sde: Whether to use State Dependent Exploration (SDE)\n instead of action noise exploration (default: False)\n :param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE\n Default: -1 (only sample at the beginning of the rollout)\n :param use_sde_at_warmup: Whether to use gSDE instead of uniform sampling\n during the warm up phase (before learning starts)\n :param sde_support: Whether the model support gSDE or not\n :param supported_action_spaces: The action spaces supported by the algorithm.\n \"\"\"\n\n def __init__(\n self,\n policy: Union[str, Type[BasePolicy]],\n env: Union[GymEnv, str],\n learning_rate: Union[float, Schedule],\n buffer_size: int = 1_000_000, # 1e6\n learning_starts: int = 100,\n batch_size: int = 256,\n tau: float = 0.005,\n gamma: float = 0.99,\n train_freq: Union[int, Tuple[int, str]] = (1, \"step\"),\n gradient_steps: int = 1,\n action_noise: Optional[ActionNoise] = None,\n replay_buffer_class: Optional[Type[ReplayBuffer]] = None,\n replay_buffer_kwargs: Optional[Dict[str, Any]] = None,\n optimize_memory_usage: bool = False,\n policy_kwargs: Optional[Dict[str, Any]] = None,\n tensorboard_log: Optional[str] = None,\n verbose: int = 0,\n device: Union[th.device, str] = \"auto\",\n support_multi_env: bool = False,\n monitor_wrapper: bool = True,\n seed: Optional[int] = None,\n use_sde: bool = False,\n sde_sample_freq: int = -1,\n use_sde_at_warmup: bool = False,\n sde_support: bool = True,\n supported_action_spaces: Optional[Tuple[spaces.Space, ...]] = None,\n ):\n\n super().__init__(\n policy=policy,\n env=env,\n learning_rate=learning_rate,\n policy_kwargs=policy_kwargs,\n tensorboard_log=tensorboard_log,\n verbose=verbose,\n device=device,\n support_multi_env=support_multi_env,\n monitor_wrapper=monitor_wrapper,\n seed=seed,\n use_sde=use_sde,\n sde_sample_freq=sde_sample_freq,\n supported_action_spaces=supported_action_spaces,\n )\n self.buffer_size = buffer_size\n self.batch_size = batch_size\n self.learning_starts = learning_starts\n self.tau = tau\n self.gamma = gamma\n self.gradient_steps = gradient_steps\n self.action_noise = action_noise\n self.optimize_memory_usage = optimize_memory_usage\n self.replay_buffer_class = replay_buffer_class\n if replay_buffer_kwargs is None:\n replay_buffer_kwargs = {}\n self.replay_buffer_kwargs = replay_buffer_kwargs\n self._episode_storage = None\n\n # Save train freq parameter, will be converted later to TrainFreq object\n self.train_freq = train_freq\n\n self.actor = None # type: Optional[th.nn.Module]\n self.replay_buffer = None # type: Optional[ReplayBuffer]\n # Update policy keyword arguments\n if sde_support:\n self.policy_kwargs[\"use_sde\"] = self.use_sde\n # For gSDE only\n self.use_sde_at_warmup = use_sde_at_warmup\n\n def _convert_train_freq(self) -> None:\n \"\"\"\n Convert `train_freq` parameter (int or tuple)\n to a TrainFreq object.\n \"\"\"\n if not isinstance(self.train_freq, TrainFreq):\n train_freq = self.train_freq\n\n # The value of the train frequency will be checked later\n if not isinstance(train_freq, tuple):\n train_freq = (train_freq, \"step\")\n\n try:\n train_freq = (train_freq[0], TrainFrequencyUnit(train_freq[1]))\n except ValueError as e:\n raise ValueError(\n f\"The unit of the `train_freq` must be either 'step' or 'episode' not '{train_freq[1]}'!\"\n ) from e\n\n if not isinstance(train_freq[0], int):\n raise ValueError(f\"The frequency of `train_freq` must be an integer and not {train_freq[0]}\")\n\n self.train_freq = TrainFreq(*train_freq)\n\n def _setup_model(self) -> None:\n self._setup_lr_schedule()\n self.set_random_seed(self.seed)\n\n # Use DictReplayBuffer if needed\n if self.replay_buffer_class is None:\n if isinstance(self.observation_space, spaces.Dict):\n self.replay_buffer_class = DictReplayBuffer\n else:\n self.replay_buffer_class = ReplayBuffer\n\n elif self.replay_buffer_class == HerReplayBuffer:\n assert self.env is not None, \"You must pass an environment when using `HerReplayBuffer`\"\n\n # If using offline sampling, we need a classic replay buffer too\n if self.replay_buffer_kwargs.get(\"online_sampling\", True):\n replay_buffer = None\n else:\n replay_buffer = DictReplayBuffer(\n self.buffer_size,\n self.observation_space,\n self.action_space,\n device=self.device,\n optimize_memory_usage=self.optimize_memory_usage,\n )\n\n self.replay_buffer = HerReplayBuffer(\n self.env,\n self.buffer_size,\n device=self.device,\n replay_buffer=replay_buffer,\n **self.replay_buffer_kwargs,\n )\n\n if self.replay_buffer is None:\n self.replay_buffer = self.replay_buffer_class(\n self.buffer_size,\n self.observation_space,\n self.action_space,\n device=self.device,\n n_envs=self.n_envs,\n optimize_memory_usage=self.optimize_memory_usage,\n **self.replay_buffer_kwargs,\n )\n\n self.policy = self.policy_class( # pytype:disable=not-instantiable\n self.observation_space,\n self.action_space,\n self.lr_schedule,\n **self.policy_kwargs, # pytype:disable=not-instantiable\n )\n self.policy = self.policy.to(self.device)\n\n # Convert train freq parameter to TrainFreq object\n self._convert_train_freq()\n\n def save_replay_buffer(self, path: Union[str, pathlib.Path, io.BufferedIOBase]) -> None:\n \"\"\"\n Save the replay buffer as a pickle file.\n\n :param path: Path to the file where the replay buffer should be saved.\n if path is a str or pathlib.Path, the path is automatically created if necessary.\n \"\"\"\n assert self.replay_buffer is not None, \"The replay buffer is not defined\"\n save_to_pkl(path, self.replay_buffer, self.verbose)\n\n def load_replay_buffer(\n self,\n path: Union[str, pathlib.Path, io.BufferedIOBase],\n truncate_last_traj: bool = True,\n ) -> None:\n \"\"\"\n Load a replay buffer from a pickle file.\n\n :param path: Path to the pickled replay buffer.\n :param truncate_last_traj: When using ``HerReplayBuffer`` with online sampling:\n If set to ``True``, we assume that the last trajectory in the replay buffer was finished\n (and truncate it).\n If set to ``False``, we assume that we continue the same trajectory (same episode).\n \"\"\"\n self.replay_buffer = load_from_pkl(path, self.verbose)\n assert isinstance(self.replay_buffer, ReplayBuffer), \"The replay buffer must inherit from ReplayBuffer class\"\n\n # Backward compatibility with SB3 < 2.1.0 replay buffer\n # Keep old behavior: do not handle timeout termination separately\n if not hasattr(self.replay_buffer, \"handle_timeout_termination\"): # pragma: no cover\n self.replay_buffer.handle_timeout_termination = False\n self.replay_buffer.timeouts = np.zeros_like(self.replay_buffer.dones)\n\n if isinstance(self.replay_buffer, HerReplayBuffer):\n assert self.env is not None, \"You must pass an environment at load time when using `HerReplayBuffer`\"\n self.replay_buffer.set_env(self.get_env())\n if truncate_last_traj:\n self.replay_buffer.truncate_last_trajectory()\n\n def _setup_learn(\n self,\n total_timesteps: int,\n callback: MaybeCallback = None,\n reset_num_timesteps: bool = True,\n tb_log_name: str = \"run\",\n progress_bar: bool = False,\n ) -> Tuple[int, BaseCallback]:\n \"\"\"\n cf `BaseAlgorithm`.\n \"\"\"\n # Prevent continuity issue by truncating trajectory\n # when using memory efficient replay buffer\n # see https://github.com/DLR-RM/stable-baselines3/issues/46\n\n # Special case when using HerReplayBuffer,\n # the classic replay buffer is inside it when using offline sampling\n if isinstance(self.replay_buffer, HerReplayBuffer):\n replay_buffer = self.replay_buffer.replay_buffer\n else:\n replay_buffer = self.replay_buffer\n\n truncate_last_traj = (\n self.optimize_memory_usage\n and reset_num_timesteps\n and replay_buffer is not None\n and (replay_buffer.full or replay_buffer.pos > 0)\n )\n\n if truncate_last_traj:\n warnings.warn(\n \"The last trajectory in the replay buffer will be truncated, \"\n \"see https://github.com/DLR-RM/stable-baselines3/issues/46.\"\n \"You should use `reset_num_timesteps=False` or `optimize_memory_usage=False`\"\n \"to avoid that issue.\"\n )\n # Go to the previous index\n pos = (replay_buffer.pos - 1) % replay_buffer.buffer_size\n replay_buffer.dones[pos] = True\n\n return super()._setup_learn(\n total_timesteps,\n callback,\n reset_num_timesteps,\n tb_log_name,\n progress_bar,\n )\n\n def learn(\n self: SelfOffPolicyAlgorithm,\n total_timesteps: int,\n callback: MaybeCallback = None,\n log_interval: int = 4,\n tb_log_name: str = \"run\",\n reset_num_timesteps: bool = True,\n progress_bar: bool = False,\n ) -> SelfOffPolicyAlgorithm:\n\n total_timesteps, callback = self._setup_learn(\n total_timesteps,\n callback,\n reset_num_timesteps,\n tb_log_name,\n progress_bar,\n )\n\n callback.on_training_start(locals(), globals())\n\n while self.num_timesteps < total_timesteps:\n rollout = self.collect_rollouts(\n self.env,\n train_freq=self.train_freq,\n action_noise=self.action_noise,\n callback=callback,\n learning_starts=self.learning_starts,\n replay_buffer=self.replay_buffer,\n log_interval=log_interval,\n )\n\n if rollout.continue_training is False:\n break\n\n if self.num_timesteps > 0 and self.num_timesteps > self.learning_starts:\n # If no `gradient_steps` is specified,\n # do as many gradients steps as steps performed during the rollout\n gradient_steps = self.gradient_steps if self.gradient_steps >= 0 else rollout.episode_timesteps\n # Special case when the user passes `gradient_steps=0`\n if gradient_steps > 0:\n self.train(batch_size=self.batch_size, gradient_steps=gradient_steps)\n\n callback.on_training_end()\n\n return self\n\n def train(self, gradient_steps: int, batch_size: int) -> None:\n \"\"\"\n Sample the replay buffer and do the updates\n (gradient descent and update target networks)\n \"\"\"\n raise NotImplementedError()\n\n def _sample_action(\n self,\n learning_starts: int,\n action_noise: Optional[ActionNoise] = None,\n n_envs: int = 1,\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Sample an action according to the exploration policy.\n This is either done by sampling the probability distribution of the policy,\n or sampling a random action (from a uniform distribution over the action space)\n or by adding noise to the deterministic output.\n\n :param action_noise: Action noise that will be used for exploration\n Required for deterministic policy (e.g. TD3). This can also be used\n in addition to the stochastic policy for SAC.\n :param learning_starts: Number of steps before learning for the warm-up phase.\n :param n_envs:\n :return: action to take in the environment\n and scaled action that will be stored in the replay buffer.\n The two differs when the action space is not normalized (bounds are not [-1, 1]).\n \"\"\"\n # Select action randomly or according to policy\n if self.num_timesteps < learning_starts and not (self.use_sde and self.use_sde_at_warmup):\n # Warmup phase\n unscaled_action = np.array([self.action_space.sample() for _ in range(n_envs)])\n else:\n # Note: when using continuous actions,\n # we assume that the policy uses tanh to scale the action\n # We use non-deterministic action in the case of SAC, for TD3, it does not matter\n unscaled_action, _ = self.predict(self._last_obs, deterministic=False)\n\n # Rescale the action from [low, high] to [-1, 1]\n if isinstance(self.action_space, spaces.Box):\n scaled_action = self.policy.scale_action(unscaled_action)\n\n # Add noise to the action (improve exploration)\n if action_noise is not None:\n scaled_action = np.clip(scaled_action + action_noise(), -1, 1)\n\n # We store the scaled action in the buffer\n buffer_action = scaled_action\n action = self.policy.unscale_action(scaled_action)\n else:\n # Discrete case, no need to normalize or clip\n buffer_action = unscaled_action\n action = buffer_action\n return action, buffer_action\n\n def _dump_logs(self) -> None:\n \"\"\"\n Write log.\n \"\"\"\n time_elapsed = max((time.time_ns() - self.start_time) / 1e9, sys.float_info.epsilon)\n fps = int((self.num_timesteps - self._num_timesteps_at_start) / time_elapsed)\n self.logger.record(\"time/episodes\", self._episode_num, exclude=\"tensorboard\")\n if len(self.ep_info_buffer) > 0 and len(self.ep_info_buffer[0]) > 0:\n self.logger.record(\"rollout/ep_rew_mean\", safe_mean([ep_info[\"r\"] for ep_info in self.ep_info_buffer]))\n self.logger.record(\"rollout/ep_len_mean\", safe_mean([ep_info[\"l\"] for ep_info in self.ep_info_buffer]))\n self.logger.record(\"time/fps\", fps)\n self.logger.record(\"time/time_elapsed\", int(time_elapsed), exclude=\"tensorboard\")\n self.logger.record(\"time/total_timesteps\", self.num_timesteps, exclude=\"tensorboard\")\n if self.use_sde:\n self.logger.record(\"train/std\", (self.actor.get_std()).mean().item())\n\n if len(self.ep_success_buffer) > 0:\n self.logger.record(\"rollout/success_rate\", safe_mean(self.ep_success_buffer))\n # Pass the number of timesteps for tensorboard\n self.logger.dump(step=self.num_timesteps)\n\n def _on_step(self) -> None:\n \"\"\"\n Method called after each step in the environment.\n It is meant to trigger DQN target network update\n but can be used for other purposes\n \"\"\"\n pass\n\n def _store_transition(\n self,\n replay_buffer: ReplayBuffer,\n buffer_action: np.ndarray,\n new_obs: Union[np.ndarray, Dict[str, np.ndarray]],\n reward: np.ndarray,\n dones: np.ndarray,\n infos: List[Dict[str, Any]],\n ) -> None:\n \"\"\"\n Store transition in the replay buffer.\n We store the normalized action and the unnormalized observation.\n It also handles terminal observations (because VecEnv resets automatically).\n\n :param replay_buffer: Replay buffer object where to store the transition.\n :param buffer_action: normalized action\n :param new_obs: next observation in the current episode\n or first observation of the episode (when dones is True)\n :param reward: reward for the current transition\n :param dones: Termination signal\n :param infos: List of additional information about the transition.\n It may contain the terminal observations and information about timeout.\n \"\"\"\n # Store only the unnormalized version\n if self._vec_normalize_env is not None:\n new_obs_ = self._vec_normalize_env.get_original_obs()\n reward_ = self._vec_normalize_env.get_original_reward()\n else:\n # Avoid changing the original ones\n self._last_original_obs, new_obs_, reward_ = self._last_obs, new_obs, reward\n\n # Avoid modification by reference\n next_obs = deepcopy(new_obs_)\n # As the VecEnv resets automatically, new_obs is already the\n # first observation of the next episode\n for i, done in enumerate(dones):\n if done and infos[i].get(\"terminal_observation\") is not None:\n if isinstance(next_obs, dict):\n next_obs_ = infos[i][\"terminal_observation\"]\n # VecNormalize normalizes the terminal observation\n if self._vec_normalize_env is not None:\n next_obs_ = self._vec_normalize_env.unnormalize_obs(next_obs_)\n # Replace next obs for the correct envs\n for key in next_obs.keys():\n next_obs[key][i] = next_obs_[key]\n else:\n next_obs[i] = infos[i][\"terminal_observation\"]\n # VecNormalize normalizes the terminal observation\n if self._vec_normalize_env is not None:\n next_obs[i] = self._vec_normalize_env.unnormalize_obs(next_obs[i, :])\n\n replay_buffer.add(\n self._last_original_obs,\n next_obs,\n buffer_action,\n reward_,\n dones,\n infos,\n )\n\n self._last_obs = new_obs\n # Save the unnormalized observation\n if self._vec_normalize_env is not None:\n self._last_original_obs = new_obs_\n\n def collect_rollouts(\n self,\n env: VecEnv,\n callback: BaseCallback,\n train_freq: TrainFreq,\n replay_buffer: ReplayBuffer,\n action_noise: Optional[ActionNoise] = None,\n learning_starts: int = 0,\n log_interval: Optional[int] = None,\n ) -> RolloutReturn:\n \"\"\"\n Collect experiences and store them into a ``ReplayBuffer``.\n\n :param env: The training environment\n :param callback: Callback that will be called at each step\n (and at the beginning and end of the rollout)\n :param train_freq: How much experience to collect\n by doing rollouts of current policy.\n Either ``TrainFreq(<n>, TrainFrequencyUnit.STEP)``\n or ``TrainFreq(<n>, TrainFrequencyUnit.EPISODE)``\n with ``<n>`` being an integer greater than 0.\n :param action_noise: Action noise that will be used for exploration\n Required for deterministic policy (e.g. TD3). This can also be used\n in addition to the stochastic policy for SAC.\n :param learning_starts: Number of steps before learning for the warm-up phase.\n :param replay_buffer:\n :param log_interval: Log data every ``log_interval`` episodes\n :return:\n \"\"\"\n # Switch to eval mode (this affects batch norm / dropout)\n self.policy.set_training_mode(False)\n\n num_collected_steps, num_collected_episodes = 0, 0\n\n assert isinstance(env, VecEnv), \"You must pass a VecEnv\"\n assert train_freq.frequency > 0, \"Should at least collect one step or episode.\"\n\n if env.num_envs > 1:\n assert train_freq.unit == TrainFrequencyUnit.STEP, \"You must use only one env when doing episodic training.\"\n\n # Vectorize action noise if needed\n if action_noise is not None and env.num_envs > 1 and not isinstance(action_noise, VectorizedActionNoise):\n action_noise = VectorizedActionNoise(action_noise, env.num_envs)\n\n if self.use_sde:\n self.actor.reset_noise(env.num_envs)\n\n callback.on_rollout_start()\n continue_training = True\n\n while should_collect_more_steps(train_freq, num_collected_steps, num_collected_episodes):\n if self.use_sde and self.sde_sample_freq > 0 and num_collected_steps % self.sde_sample_freq == 0:\n # Sample a new noise matrix\n self.actor.reset_noise(env.num_envs)\n\n # Select action randomly or according to policy\n actions, buffer_actions = self._sample_action(learning_starts, action_noise, env.num_envs)\n\n # Rescale and perform action\n new_obs, rewards, dones, infos = env.step(actions)\n\n self.num_timesteps += env.num_envs\n num_collected_steps += 1\n\n # Give access to local variables\n callback.update_locals(locals())\n # Only stop training if return value is False, not when it is None.\n if callback.on_step() is False:\n return RolloutReturn(num_collected_steps * env.num_envs, num_collected_episodes, continue_training=False)\n\n # Retrieve reward and episode length if using Monitor wrapper\n self._update_info_buffer(infos, dones)\n\n # Store data in replay buffer (normalized action and unnormalized observation)\n self._store_transition(replay_buffer, buffer_actions, new_obs, rewards, dones, infos)\n\n self._update_current_progress_remaining(self.num_timesteps, self._total_timesteps)\n\n # For DQN, check if the target network should be updated\n # and update the exploration schedule\n # For SAC/TD3, the update is dones as the same time as the gradient update\n # see https://github.com/hill-a/stable-baselines/issues/900\n self._on_step()\n\n for idx, done in enumerate(dones):\n if done:\n # Update stats\n num_collected_episodes += 1\n self._episode_num += 1\n\n if action_noise is not None:\n kwargs = dict(indices=[idx]) if env.num_envs > 1 else {}\n action_noise.reset(**kwargs)\n\n # Log training infos\n if log_interval is not None and self._episode_num % log_interval == 0:\n self._dump_logs()\n callback.on_rollout_end()\n\n return RolloutReturn(num_collected_steps * env.num_envs, num_collected_episodes, continue_training)" }, { "identifier": "BasePolicy", "path": "stable_baselines3/common/policies.py", "snippet": "class BasePolicy(BaseModel, ABC):\n \"\"\"The base policy object.\n\n Parameters are mostly the same as `BaseModel`; additions are documented below.\n\n :param args: positional arguments passed through to `BaseModel`.\n :param kwargs: keyword arguments passed through to `BaseModel`.\n :param squash_output: For continuous actions, whether the output is squashed\n or not using a ``tanh()`` function.\n \"\"\"\n\n def __init__(self, *args, squash_output: bool = False, **kwargs):\n super().__init__(*args, **kwargs)\n self._squash_output = squash_output\n\n @staticmethod\n def _dummy_schedule(progress_remaining: float) -> float:\n \"\"\"(float) Useful for pickling policy.\"\"\"\n del progress_remaining\n return 0.0\n\n @property\n def squash_output(self) -> bool:\n \"\"\"(bool) Getter for squash_output.\"\"\"\n return self._squash_output\n\n @staticmethod\n def init_weights(module: nn.Module, gain: float = 1) -> None:\n \"\"\"\n Orthogonal initialization (used in PPO and A2C)\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Conv2d)):\n nn.init.orthogonal_(module.weight, gain=gain)\n if module.bias is not None:\n module.bias.data.fill_(0.0)\n\n @abstractmethod\n def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:\n \"\"\"\n Get the action according to the policy for a given observation.\n\n By default provides a dummy implementation -- not all BasePolicy classes\n implement this, e.g. if they are a Critic in an Actor-Critic method.\n\n :param observation:\n :param deterministic: Whether to use stochastic or deterministic actions\n :return: Taken action according to the policy\n \"\"\"\n\n def predict(\n self,\n observation: Union[np.ndarray, Dict[str, np.ndarray]],\n state: Optional[Tuple[np.ndarray, ...]] = None,\n episode_start: Optional[np.ndarray] = None,\n deterministic: bool = False,\n ) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:\n \"\"\"\n Get the policy action from an observation (and optional hidden state).\n Includes sugar-coating to handle different observations (e.g. normalizing images).\n\n :param observation: the input observation\n :param state: The last hidden states (can be None, used in recurrent policies)\n :param episode_start: The last masks (can be None, used in recurrent policies)\n this correspond to beginning of episodes,\n where the hidden states of the RNN must be reset.\n :param deterministic: Whether or not to return deterministic actions.\n :return: the model's action and the next hidden state\n (used in recurrent policies)\n \"\"\"\n # TODO (GH/1): add support for RNN policies\n # if state is None:\n # state = self.initial_state\n # if episode_start is None:\n # episode_start = [False for _ in range(self.n_envs)]\n # Switch to eval mode (this affects batch norm / dropout)\n self.set_training_mode(False)\n\n observation, vectorized_env = self.obs_to_tensor(observation)\n\n with th.no_grad():\n actions = self._predict(observation, deterministic=deterministic)\n # Convert to numpy, and reshape to the original action shape\n actions = actions.cpu().numpy().reshape((-1,) + self.action_space.shape)\n\n if isinstance(self.action_space, spaces.Box):\n if self.squash_output:\n # Rescale to proper domain when using squashing\n actions = self.unscale_action(actions)\n else:\n # Actions could be on arbitrary scale, so clip the actions to avoid\n # out of bound error (e.g. if sampling from a Gaussian distribution)\n actions = np.clip(actions, self.action_space.low, self.action_space.high)\n\n # Remove batch dimension if needed\n if not vectorized_env:\n actions = actions.squeeze(axis=0)\n\n return actions, state\n\n def scale_action(self, action: np.ndarray) -> np.ndarray:\n \"\"\"\n Rescale the action from [low, high] to [-1, 1]\n (no need for symmetric action space)\n\n :param action: Action to scale\n :return: Scaled action\n \"\"\"\n low, high = self.action_space.low, self.action_space.high\n return 2.0 * ((action - low) / (high - low)) - 1.0\n\n def unscale_action(self, scaled_action: np.ndarray) -> np.ndarray:\n \"\"\"\n Rescale the action from [-1, 1] to [low, high]\n (no need for symmetric action space)\n\n :param scaled_action: Action to un-scale\n \"\"\"\n low, high = self.action_space.low, self.action_space.high\n return low + (0.5 * (scaled_action + 1.0) * (high - low))" }, { "identifier": "GymEnv", "path": "stable_baselines3/common/type_aliases.py", "snippet": "class RolloutBufferSamples(NamedTuple):\nclass DictRolloutBufferSamples(NamedTuple):\nclass ReplayBufferSamples(NamedTuple):\nclass DictReplayBufferSamples(NamedTuple):\nclass RolloutReturn(NamedTuple):\nclass TrainFrequencyUnit(Enum):\nclass TrainFreq(NamedTuple):\nclass PolicyPredictor(Protocol):\n STEP = \"step\"\n EPISODE = \"episode\"\n def predict(\n self,\n observation: Union[np.ndarray, Dict[str, np.ndarray]],\n state: Optional[Tuple[np.ndarray, ...]] = None,\n episode_start: Optional[np.ndarray] = None,\n deterministic: bool = False,\n ) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:" }, { "identifier": "get_parameters_by_name", "path": "stable_baselines3/common/utils.py", "snippet": "def get_parameters_by_name(model: th.nn.Module, included_names: Iterable[str]) -> List[th.Tensor]:\n \"\"\"\n Extract parameters from the state dict of ``model``\n if the name contains one of the strings in ``included_names``.\n\n :param model: the model where the parameters come from.\n :param included_names: substrings of names to include.\n :return: List of parameters values (Pytorch tensors)\n that matches the queried names.\n \"\"\"\n return [param for name, param in model.state_dict().items() if any([key in name for key in included_names])]" }, { "identifier": "polyak_update", "path": "stable_baselines3/common/utils.py", "snippet": "def polyak_update(\n params: Iterable[th.Tensor],\n target_params: Iterable[th.Tensor],\n tau: float,\n) -> None:\n \"\"\"\n Perform a Polyak average update on ``target_params`` using ``params``:\n target parameters are slowly updated towards the main parameters.\n ``tau``, the soft update coefficient controls the interpolation:\n ``tau=1`` corresponds to copying the parameters to the target ones whereas nothing happens when ``tau=0``.\n The Polyak update is done in place, with ``no_grad``, and therefore does not create intermediate tensors,\n or a computation graph, reducing memory cost and improving performance. We scale the target params\n by ``1-tau`` (in-place), add the new weights, scaled by ``tau`` and store the result of the sum in the target\n params (in place).\n See https://github.com/DLR-RM/stable-baselines3/issues/93\n\n :param params: parameters to use to update the target params\n :param target_params: parameters to update\n :param tau: the soft update coefficient (\"Polyak update\", between 0 and 1)\n \"\"\"\n with th.no_grad():\n # zip does not raise an exception if length of parameters does not match.\n for param, target_param in zip_strict(params, target_params):\n target_param.data.mul_(1 - tau)\n th.add(target_param.data, param.data, alpha=tau, out=target_param.data)" }, { "identifier": "quantile_huber_loss", "path": "sb3_contrib/common/utils.py", "snippet": "def quantile_huber_loss(\n current_quantiles: th.Tensor,\n target_quantiles: th.Tensor,\n cum_prob: Optional[th.Tensor] = None,\n sum_over_quantiles: bool = True,\n) -> th.Tensor:\n \"\"\"\n The quantile-regression loss, as described in the QR-DQN and TQC papers.\n Partially taken from https://github.com/bayesgroup/tqc_pytorch.\n\n :param current_quantiles: current estimate of quantiles, must be either\n (batch_size, n_quantiles) or (batch_size, n_critics, n_quantiles)\n :param target_quantiles: target of quantiles, must be either (batch_size, n_target_quantiles),\n (batch_size, 1, n_target_quantiles), or (batch_size, n_critics, n_target_quantiles)\n :param cum_prob: cumulative probabilities to calculate quantiles (also called midpoints in QR-DQN paper),\n must be either (batch_size, n_quantiles), (batch_size, 1, n_quantiles), or (batch_size, n_critics, n_quantiles).\n (if None, calculating unit quantiles)\n :param sum_over_quantiles: if summing over the quantile dimension or not\n :return: the loss\n \"\"\"\n if current_quantiles.ndim != target_quantiles.ndim:\n raise ValueError(\n f\"Error: The dimension of curremt_quantile ({current_quantiles.ndim}) needs to match \"\n f\"the dimension of target_quantiles ({target_quantiles.ndim}).\"\n )\n if current_quantiles.shape[0] != target_quantiles.shape[0]:\n raise ValueError(\n f\"Error: The batch size of curremt_quantile ({current_quantiles.shape[0]}) needs to match \"\n f\"the batch size of target_quantiles ({target_quantiles.shape[0]}).\"\n )\n if current_quantiles.ndim not in (2, 3):\n raise ValueError(f\"Error: The dimension of current_quantiles ({current_quantiles.ndim}) needs to be either 2 or 3.\")\n\n if cum_prob is None:\n n_quantiles = current_quantiles.shape[-1]\n # Cumulative probabilities to calculate quantiles.\n cum_prob = (th.arange(n_quantiles, device=current_quantiles.device, dtype=th.float) + 0.5) / n_quantiles\n if current_quantiles.ndim == 2:\n # For QR-DQN, current_quantiles have a shape (batch_size, n_quantiles), and make cum_prob\n # broadcastable to (batch_size, n_quantiles, n_target_quantiles)\n cum_prob = cum_prob.view(1, -1, 1)\n elif current_quantiles.ndim == 3:\n # For TQC, current_quantiles have a shape (batch_size, n_critics, n_quantiles), and make cum_prob\n # broadcastable to (batch_size, n_critics, n_quantiles, n_target_quantiles)\n cum_prob = cum_prob.view(1, 1, -1, 1)\n\n # QR-DQN\n # target_quantiles: (batch_size, n_target_quantiles) -> (batch_size, 1, n_target_quantiles)\n # current_quantiles: (batch_size, n_quantiles) -> (batch_size, n_quantiles, 1)\n # pairwise_delta: (batch_size, n_target_quantiles, n_quantiles)\n # TQC\n # target_quantiles: (batch_size, 1, n_target_quantiles) -> (batch_size, 1, 1, n_target_quantiles)\n # current_quantiles: (batch_size, n_critics, n_quantiles) -> (batch_size, n_critics, n_quantiles, 1)\n # pairwise_delta: (batch_size, n_critics, n_quantiles, n_target_quantiles)\n # Note: in both cases, the loss has the same shape as pairwise_delta\n pairwise_delta = target_quantiles.unsqueeze(-2) - current_quantiles.unsqueeze(-1)\n abs_pairwise_delta = th.abs(pairwise_delta)\n huber_loss = th.where(abs_pairwise_delta > 1, abs_pairwise_delta - 0.5, pairwise_delta**2 * 0.5)\n loss = th.abs(cum_prob - (pairwise_delta.detach() < 0).float()) * huber_loss\n if sum_over_quantiles:\n loss = loss.sum(dim=-2).mean()\n else:\n loss = loss.mean()\n return loss" }, { "identifier": "CnnPolicy", "path": "sb3_contrib/tqc/policies.py", "snippet": "LOG_STD_MAX = 2\nLOG_STD_MIN = -20\nclass Actor(BasePolicy):\nclass Critic(BaseModel):\nclass TQCPolicy(BasePolicy):\nclass CnnPolicy(TQCPolicy):\nclass MultiInputPolicy(TQCPolicy):\n def __init__(\n self,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n net_arch: List[int],\n features_extractor: nn.Module,\n features_dim: int,\n activation_fn: Type[nn.Module] = nn.ReLU,\n use_sde: bool = False,\n log_std_init: float = -3,\n full_std: bool = True,\n use_expln: bool = False,\n clip_mean: float = 2.0,\n normalize_images: bool = True,\n ):\n def _get_constructor_parameters(self) -> Dict[str, Any]:\n def get_std(self) -> th.Tensor:\n def reset_noise(self, batch_size: int = 1) -> None:\n def get_action_dist_params(self, obs: th.Tensor) -> Tuple[th.Tensor, th.Tensor, Dict[str, th.Tensor]]:\n def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor:\n def action_log_prob(self, obs: th.Tensor) -> Tuple[th.Tensor, th.Tensor]:\n def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:\n def __init__(\n self,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n net_arch: List[int],\n features_extractor: nn.Module,\n features_dim: int,\n activation_fn: Type[nn.Module] = nn.ReLU,\n normalize_images: bool = True,\n n_quantiles: int = 25,\n n_critics: int = 2,\n share_features_extractor: bool = False,\n ):\n def forward(self, obs: th.Tensor, action: th.Tensor) -> List[th.Tensor]:\n def __init__(\n self,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n lr_schedule: Schedule,\n net_arch: Optional[Union[List[int], Dict[str, List[int]]]] = None,\n activation_fn: Type[nn.Module] = nn.ReLU,\n use_sde: bool = False,\n log_std_init: float = -3,\n use_expln: bool = False,\n clip_mean: float = 2.0,\n features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,\n features_extractor_kwargs: Optional[Dict[str, Any]] = None,\n normalize_images: bool = True,\n optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,\n optimizer_kwargs: Optional[Dict[str, Any]] = None,\n n_quantiles: int = 25,\n n_critics: int = 2,\n share_features_extractor: bool = False,\n ):\n def _build(self, lr_schedule: Schedule) -> None:\n def _get_constructor_parameters(self) -> Dict[str, Any]:\n def reset_noise(self, batch_size: int = 1) -> None:\n def make_actor(self, features_extractor: Optional[BaseFeaturesExtractor] = None) -> Actor:\n def make_critic(self, features_extractor: Optional[BaseFeaturesExtractor] = None) -> Critic:\n def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor:\n def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:\n def set_training_mode(self, mode: bool) -> None:\n def __init__(\n self,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n lr_schedule: Schedule,\n net_arch: Optional[Union[List[int], Dict[str, List[int]]]] = None,\n activation_fn: Type[nn.Module] = nn.ReLU,\n use_sde: bool = False,\n log_std_init: float = -3,\n use_expln: bool = False,\n clip_mean: float = 2.0,\n features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,\n features_extractor_kwargs: Optional[Dict[str, Any]] = None,\n normalize_images: bool = True,\n optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,\n optimizer_kwargs: Optional[Dict[str, Any]] = None,\n n_quantiles: int = 25,\n n_critics: int = 2,\n share_features_extractor: bool = False,\n ):\n def __init__(\n self,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n lr_schedule: Schedule,\n net_arch: Optional[Union[List[int], Dict[str, List[int]]]] = None,\n activation_fn: Type[nn.Module] = nn.ReLU,\n use_sde: bool = False,\n log_std_init: float = -3,\n use_expln: bool = False,\n clip_mean: float = 2.0,\n features_extractor_class: Type[BaseFeaturesExtractor] = CombinedExtractor,\n features_extractor_kwargs: Optional[Dict[str, Any]] = None,\n normalize_images: bool = True,\n optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,\n optimizer_kwargs: Optional[Dict[str, Any]] = None,\n n_quantiles: int = 25,\n n_critics: int = 2,\n share_features_extractor: bool = False,\n ):" } ]
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, TypeVar, Union from gym import spaces from stable_baselines3.common.buffers import ReplayBuffer from stable_baselines3.common.noise import ActionNoise from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm from stable_baselines3.common.policies import BasePolicy from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback from stable_baselines3.common.utils import get_parameters_by_name, polyak_update from sb3_contrib.common.utils import quantile_huber_loss from sb3_contrib.tqc.policies import CnnPolicy, MlpPolicy, MultiInputPolicy, TQCPolicy import numpy as np import torch as th
13,267
SelfTQC = TypeVar("SelfTQC", bound="TQC") class TQC(OffPolicyAlgorithm): """ Controlling Overestimation Bias with Truncated Mixture of Continuous Distributional Quantile Critics. Paper: https://arxiv.org/abs/2005.04269 This implementation uses SB3 SAC implementation as base. :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...) :param env: The environment to learn from (if registered in Gym, can be str) :param learning_rate: learning rate for adam optimizer, the same learning rate will be used for all networks (Q-Values, Actor and Value function) it can be a function of the current progress remaining (from 1 to 0) :param buffer_size: size of the replay buffer :param learning_starts: how many steps of the model to collect transitions for before learning starts :param batch_size: Minibatch size for each gradient update :param tau: the soft update coefficient ("Polyak update", between 0 and 1) :param gamma: the discount factor :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit like ``(5, "step")`` or ``(2, "episode")``. :param gradient_steps: How many gradient update after each step :param action_noise: the action noise type (None by default), this can help for hard exploration problem. Cf common.noise for the different action noise type. :param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``). If ``None``, it will be automatically selected. :param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation. :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer at a cost of more complexity. See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195 :param ent_coef: Entropy regularization coefficient. (Equivalent to inverse of reward scale in the original SAC paper.) Controlling exploration/exploitation trade-off. Set it to 'auto' to learn it automatically (and 'auto_0.1' for using 0.1 as initial value) :param target_update_interval: update the target network every ``target_network_update_freq`` gradient steps. :param target_entropy: target entropy when learning ``ent_coef`` (``ent_coef = 'auto'``) :param top_quantiles_to_drop_per_net: Number of quantiles to drop per network :param use_sde: Whether to use generalized State Dependent Exploration (gSDE) instead of action noise exploration (default: False) :param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE Default: -1 (only sample at the beginning of the rollout) :param use_sde_at_warmup: Whether to use gSDE instead of uniform sampling during the warm up phase (before learning starts) :param policy_kwargs: additional arguments to be passed to the policy on creation :param verbose: the verbosity level: 0 no output, 1 info, 2 debug :param seed: Seed for the pseudo random generators :param device: Device (cpu, cuda, ...) on which the code should be run. Setting it to auto, the code will be run on the GPU if possible. :param _init_setup_model: Whether or not to build the network at the creation of the instance """ policy_aliases: Dict[str, Type[BasePolicy]] = { "MlpPolicy": MlpPolicy,
SelfTQC = TypeVar("SelfTQC", bound="TQC") class TQC(OffPolicyAlgorithm): """ Controlling Overestimation Bias with Truncated Mixture of Continuous Distributional Quantile Critics. Paper: https://arxiv.org/abs/2005.04269 This implementation uses SB3 SAC implementation as base. :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...) :param env: The environment to learn from (if registered in Gym, can be str) :param learning_rate: learning rate for adam optimizer, the same learning rate will be used for all networks (Q-Values, Actor and Value function) it can be a function of the current progress remaining (from 1 to 0) :param buffer_size: size of the replay buffer :param learning_starts: how many steps of the model to collect transitions for before learning starts :param batch_size: Minibatch size for each gradient update :param tau: the soft update coefficient ("Polyak update", between 0 and 1) :param gamma: the discount factor :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit like ``(5, "step")`` or ``(2, "episode")``. :param gradient_steps: How many gradient update after each step :param action_noise: the action noise type (None by default), this can help for hard exploration problem. Cf common.noise for the different action noise type. :param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``). If ``None``, it will be automatically selected. :param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation. :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer at a cost of more complexity. See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195 :param ent_coef: Entropy regularization coefficient. (Equivalent to inverse of reward scale in the original SAC paper.) Controlling exploration/exploitation trade-off. Set it to 'auto' to learn it automatically (and 'auto_0.1' for using 0.1 as initial value) :param target_update_interval: update the target network every ``target_network_update_freq`` gradient steps. :param target_entropy: target entropy when learning ``ent_coef`` (``ent_coef = 'auto'``) :param top_quantiles_to_drop_per_net: Number of quantiles to drop per network :param use_sde: Whether to use generalized State Dependent Exploration (gSDE) instead of action noise exploration (default: False) :param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE Default: -1 (only sample at the beginning of the rollout) :param use_sde_at_warmup: Whether to use gSDE instead of uniform sampling during the warm up phase (before learning starts) :param policy_kwargs: additional arguments to be passed to the policy on creation :param verbose: the verbosity level: 0 no output, 1 info, 2 debug :param seed: Seed for the pseudo random generators :param device: Device (cpu, cuda, ...) on which the code should be run. Setting it to auto, the code will be run on the GPU if possible. :param _init_setup_model: Whether or not to build the network at the creation of the instance """ policy_aliases: Dict[str, Type[BasePolicy]] = { "MlpPolicy": MlpPolicy,
"CnnPolicy": CnnPolicy,
8
2023-10-28 01:09:21+00:00
16k
zyang1580/CoLLM
minigpt4/runners/runner_base_rec.py
[ { "identifier": "download_cached_file", "path": "minigpt4/common/dist_utils.py", "snippet": "def download_cached_file(url, check_hash=True, progress=False):\n \"\"\"\n Download a file from a URL and cache it locally. If the file already exists, it is not downloaded again.\n If distributed, only the main process downloads the file, and the other processes wait for the file to be downloaded.\n \"\"\"\n\n def get_cached_file_path():\n # a hack to sync the file path across processes\n parts = torch.hub.urlparse(url)\n filename = os.path.basename(parts.path)\n cached_file = os.path.join(timm_hub.get_cache_dir(), filename)\n\n return cached_file\n\n if is_main_process():\n timm_hub.download_cached_file(url, check_hash, progress)\n\n if is_dist_avail_and_initialized():\n dist.barrier()\n\n return get_cached_file_path()" }, { "identifier": "get_rank", "path": "minigpt4/common/dist_utils.py", "snippet": "def get_rank():\n if not is_dist_avail_and_initialized():\n return 0\n return dist.get_rank()" }, { "identifier": "get_world_size", "path": "minigpt4/common/dist_utils.py", "snippet": "def get_world_size():\n if not is_dist_avail_and_initialized():\n return 1\n return dist.get_world_size()" }, { "identifier": "is_main_process", "path": "minigpt4/common/dist_utils.py", "snippet": "def is_main_process():\n return get_rank() == 0" }, { "identifier": "main_process", "path": "minigpt4/common/dist_utils.py", "snippet": "def main_process(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n rank, _ = get_dist_info()\n if rank == 0:\n return func(*args, **kwargs)\n\n return wrapper" }, { "identifier": "registry", "path": "minigpt4/common/registry.py", "snippet": "class Registry:\n def register_builder(cls, name):\n def wrap(builder_cls):\n def register_task(cls, name):\n def wrap(task_cls):\n def register_model(cls, name):\n def wrap(model_cls):\n def register_processor(cls, name):\n def wrap(processor_cls):\n def register_lr_scheduler(cls, name):\n def wrap(lr_sched_cls):\n def register_runner(cls, name):\n def wrap(runner_cls):\n def register_path(cls, name, path):\n def register(cls, name, obj):\n def get_builder_class(cls, name):\n def get_model_class(cls, name):\n def get_task_class(cls, name):\n def get_processor_class(cls, name):\n def get_lr_scheduler_class(cls, name):\n def get_runner_class(cls, name):\n def list_runners(cls):\n def list_models(cls):\n def list_tasks(cls):\n def list_processors(cls):\n def list_lr_schedulers(cls):\n def list_datasets(cls):\n def get_path(cls, name):\n def get(cls, name, default=None, no_warning=False):\n def unregister(cls, name):" }, { "identifier": "is_url", "path": "minigpt4/common/utils.py", "snippet": "def is_url(url_or_filename):\n parsed = urlparse(url_or_filename)\n return parsed.scheme in (\"http\", \"https\")" }, { "identifier": "concat_datasets", "path": "minigpt4/datasets/data_utils.py", "snippet": "def concat_datasets(datasets):\n \"\"\"\n Concatenates multiple datasets into a single dataset.\n\n It supports may-style datasets and DataPipeline from WebDataset. Currently, does not support\n generic IterableDataset because it requires creating separate samplers.\n\n Now only supports conctenating training datasets and assuming validation and testing\n have only a single dataset. This is because metrics should not be computed on the concatenated\n datasets.\n\n Args:\n datasets: dict of torch.utils.data.Dataset objects by split.\n\n Returns:\n Dict of concatenated datasets by split, \"train\" is the concatenation of multiple datasets,\n \"val\" and \"test\" remain the same.\n\n If the input training datasets contain both map-style and DataPipeline datasets, returns\n a tuple, where the first element is a concatenated map-style dataset and the second\n element is a chained DataPipeline dataset.\n\n \"\"\"\n # concatenate datasets in the same split\n for split_name in datasets:\n if split_name != \"train\":\n assert (\n len(datasets[split_name]) == 1\n ), \"Do not support multiple {} datasets.\".format(split_name)\n datasets[split_name] = datasets[split_name][0]\n else:\n iterable_datasets, map_datasets = [], []\n for dataset in datasets[split_name]:\n if isinstance(dataset, wds.DataPipeline):\n logging.info(\n \"Dataset {} is IterableDataset, can't be concatenated.\".format(\n dataset\n )\n )\n iterable_datasets.append(dataset)\n elif isinstance(dataset, IterableDataset):\n raise NotImplementedError(\n \"Do not support concatenation of generic IterableDataset.\"\n )\n else:\n map_datasets.append(dataset)\n\n # if len(iterable_datasets) > 0:\n # concatenate map-style datasets and iterable-style datasets separately\n if len(iterable_datasets) > 1:\n chained_datasets = (\n ChainDataset(iterable_datasets)\n )\n elif len(iterable_datasets) == 1:\n chained_datasets = iterable_datasets[0]\n else:\n chained_datasets = None\n\n concat_datasets = (\n ConcatDataset(map_datasets) if len(map_datasets) > 0 else None\n )\n\n train_datasets = concat_datasets, chained_datasets\n train_datasets = tuple([x for x in train_datasets if x is not None])\n train_datasets = (\n train_datasets[0] if len(train_datasets) == 1 else train_datasets\n )\n\n datasets[split_name] = train_datasets\n\n return datasets" }, { "identifier": "reorg_datasets_by_split", "path": "minigpt4/datasets/data_utils.py", "snippet": "def reorg_datasets_by_split(datasets):\n \"\"\"\n Organizes datasets by split.\n\n Args:\n datasets: dict of torch.utils.data.Dataset objects by name.\n\n Returns:\n Dict of datasets by split {split_name: List[Datasets]}.\n \"\"\"\n # if len(datasets) == 1:\n # return datasets[list(datasets.keys())[0]]\n # else:\n reorg_datasets = dict()\n\n # reorganize by split\n for _, dataset in datasets.items():\n for split_name, dataset_split in dataset.items():\n if split_name not in reorg_datasets:\n reorg_datasets[split_name] = [dataset_split]\n else:\n reorg_datasets[split_name].append(dataset_split)\n\n return reorg_datasets" }, { "identifier": "ChainDataset", "path": "minigpt4/datasets/data_utils.py", "snippet": "class ChainDataset(wds.DataPipeline):\n r\"\"\"Dataset for chaining multiple :class:`DataPipeline` s.\n\n This class is useful to assemble different existing dataset streams. The\n chaining operation is done on-the-fly, so concatenating large-scale\n datasets with this class will be efficient.\n\n Args:\n datasets (iterable of IterableDataset): datasets to be chained together\n \"\"\"\n def __init__(self, datasets: List[wds.DataPipeline]) -> None:\n super().__init__()\n self.datasets = datasets\n self.prob = []\n self.names = []\n for dataset in self.datasets:\n if hasattr(dataset, 'name'):\n self.names.append(dataset.name)\n else:\n self.names.append('Unknown')\n if hasattr(dataset, 'sample_ratio'):\n self.prob.append(dataset.sample_ratio)\n else:\n self.prob.append(1)\n logging.info(\"One of the datapipeline doesn't define ratio and set to 1 automatically.\")\n\n def __iter__(self):\n datastreams = [iter(dataset) for dataset in self.datasets]\n while True:\n select_datastream = random.choices(datastreams, weights=self.prob, k=1)[0]\n yield next(select_datastream)" }, { "identifier": "IterLoader", "path": "minigpt4/datasets/datasets/dataloader_utils.py", "snippet": "class IterLoader:\n \"\"\"\n A wrapper to convert DataLoader as an infinite iterator.\n\n Modified from:\n https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/iter_based_runner.py\n \"\"\"\n\n def __init__(self, dataloader: DataLoader, use_distributed: bool = False):\n self._dataloader = dataloader\n self.iter_loader = iter(self._dataloader)\n self._use_distributed = use_distributed\n self._epoch = 0\n\n @property\n def epoch(self) -> int:\n return self._epoch\n\n def __next__(self):\n try:\n data = next(self.iter_loader)\n except StopIteration:\n self._epoch += 1\n if hasattr(self._dataloader.sampler, \"set_epoch\") and self._use_distributed:\n self._dataloader.sampler.set_epoch(self._epoch)\n time.sleep(2) # Prevent possible deadlock during epoch transition\n self.iter_loader = iter(self._dataloader)\n data = next(self.iter_loader)\n\n return data\n\n def __iter__(self):\n return self\n\n def __len__(self):\n return len(self._dataloader)" }, { "identifier": "MultiIterLoader", "path": "minigpt4/datasets/datasets/dataloader_utils.py", "snippet": "class MultiIterLoader:\n \"\"\"\n A simple wrapper for iterating over multiple iterators.\n\n Args:\n loaders (List[Loader]): List of Iterator loaders.\n ratios (List[float]): List of ratios to sample from each loader. If None, all loaders are sampled uniformly.\n \"\"\"\n\n def __init__(self, loaders, ratios=None):\n # assert all loaders has __next__ method\n # self.nums = []\n for loader in loaders:\n assert hasattr(\n loader, \"__next__\"\n ), \"Loader {} has no __next__ method.\".format(loader)\n #self.nums.extend(len(loader))\n\n if ratios is None:\n ratios = [1.0] * len(loaders)\n else:\n assert len(ratios) == len(loaders)\n ratios = [float(ratio) / sum(ratios) for ratio in ratios]\n\n self.loaders = loaders\n self.ratios = ratios\n\n def __next__(self):\n # random sample from each loader by ratio\n loader_idx = random.choices(range(len(self.loaders)), self.ratios, k=1)[0]\n return next(self.loaders[loader_idx])\n \n # def __len__(self):\n # return len(self.loaders)\n \n # def __iter__(self):\n # # for loader in self.loaders:\n # # yield loader\n # return self" }, { "identifier": "PrefetchLoader", "path": "minigpt4/datasets/datasets/dataloader_utils.py", "snippet": "class PrefetchLoader(object):\n \"\"\"\n Modified from https://github.com/ChenRocks/UNITER.\n\n overlap compute and cuda data transfer\n (copied and then modified from nvidia apex)\n \"\"\"\n\n def __init__(self, loader):\n self.loader = loader\n self.stream = torch.cuda.Stream()\n\n def __iter__(self):\n loader_it = iter(self.loader)\n self.preload(loader_it)\n batch = self.next(loader_it)\n while batch is not None:\n is_tuple = isinstance(batch, tuple)\n if is_tuple:\n task, batch = batch\n\n if is_tuple:\n yield task, batch\n else:\n yield batch\n batch = self.next(loader_it)\n\n def __len__(self):\n return len(self.loader)\n\n def preload(self, it):\n try:\n self.batch = next(it)\n except StopIteration:\n self.batch = None\n return\n # if record_stream() doesn't work, another option is to make sure\n # device inputs are created on the main stream.\n # self.next_input_gpu = torch.empty_like(self.next_input,\n # device='cuda')\n # self.next_target_gpu = torch.empty_like(self.next_target,\n # device='cuda')\n # Need to make sure the memory allocated for next_* is not still in use\n # by the main stream at the time we start copying to next_*:\n # self.stream.wait_stream(torch.cuda.current_stream())\n with torch.cuda.stream(self.stream):\n self.batch = move_to_cuda(self.batch)\n # more code for the alternative if record_stream() doesn't work:\n # copy_ will record the use of the pinned source tensor in this\n # side stream.\n # self.next_input_gpu.copy_(self.next_input, non_blocking=True)\n # self.next_target_gpu.copy_(self.next_target, non_blocking=True)\n # self.next_input = self.next_input_gpu\n # self.next_target = self.next_target_gpu\n\n def next(self, it):\n torch.cuda.current_stream().wait_stream(self.stream)\n batch = self.batch\n if batch is not None:\n record_cuda_stream(batch)\n self.preload(it)\n return batch\n \n def __next__(self):\n pass\n\n def __getattr__(self, name):\n method = self.loader.__getattribute__(name)\n return method" }, { "identifier": "RunnerBase", "path": "minigpt4/runners/runner_base.py", "snippet": "class RunnerBase:\n \"\"\"\n A runner class to train and evaluate a model given a task and datasets.\n\n The runner uses pytorch distributed data parallel by default. Future release\n will support other distributed frameworks.\n \"\"\"\n\n def __init__(self, cfg, task, model, datasets, job_id):\n self.config = cfg\n self.job_id = job_id\n\n self.task = task\n self.datasets = datasets\n\n self._model = model\n\n self._wrapped_model = None\n self._device = None\n self._optimizer = None\n self._scaler = None\n self._dataloaders = None\n self._lr_sched = None\n\n self.start_epoch = 0\n\n # self.setup_seeds()\n self.setup_output_dir()\n\n @property\n def device(self):\n if self._device is None:\n self._device = torch.device(self.config.run_cfg.device)\n\n return self._device\n\n @property\n def use_distributed(self):\n return self.config.run_cfg.distributed\n\n @property\n def model(self):\n \"\"\"\n A property to get the DDP-wrapped model on the device.\n \"\"\"\n # move model to device\n if self._model.device != self.device:\n self._model = self._model.to(self.device)\n\n # distributed training wrapper\n if self.use_distributed:\n if self._wrapped_model is None:\n self._wrapped_model = DDP(\n self._model, device_ids=[self.config.run_cfg.gpu], find_unused_parameters=True\n )\n else:\n self._wrapped_model = self._model\n\n return self._wrapped_model\n\n @property\n def optimizer(self):\n # TODO make optimizer class and configurations\n if self._optimizer is None:\n num_parameters = 0\n p_wd, p_non_wd = [], []\n for n, p in self.model.named_parameters():\n if not p.requires_grad:\n continue # frozen weights\n print(n)\n if p.ndim < 2 or \"bias\" in n or \"ln\" in n or \"bn\" in n:\n p_non_wd.append(p)\n else:\n p_wd.append(p)\n num_parameters += p.data.nelement()\n logging.info(\"number of trainable parameters: %d\" % num_parameters)\n self._num_trainable_para = num_parameters > 0\n optim_params = [\n {\n \"params\": p_wd,\n \"weight_decay\": float(self.config.run_cfg.weight_decay),\n },\n {\"params\": p_non_wd, \"weight_decay\": 0},\n ]\n beta2 = self.config.run_cfg.get(\"beta2\", 0.999)\n self._optimizer = torch.optim.AdamW(\n optim_params,\n lr=float(self.config.run_cfg.init_lr),\n weight_decay=float(self.config.run_cfg.weight_decay),\n betas=(0.9, beta2),\n )\n\n return self._optimizer\n\n @property\n def scaler(self):\n amp = self.config.run_cfg.get(\"amp\", False)\n\n if amp:\n if self._scaler is None:\n self._scaler = torch.cuda.amp.GradScaler()\n\n return self._scaler\n\n @property\n def lr_scheduler(self):\n \"\"\"\n A property to get and create learning rate scheduler by split just in need.\n \"\"\"\n if self._lr_sched is None:\n lr_sched_cls = registry.get_lr_scheduler_class(self.config.run_cfg.lr_sched)\n\n # max_epoch = self.config.run_cfg.max_epoch\n max_epoch = self.max_epoch\n # min_lr = self.config.run_cfg.min_lr\n min_lr = self.min_lr\n # init_lr = self.config.run_cfg.init_lr\n init_lr = self.init_lr\n\n # optional parameters\n decay_rate = self.config.run_cfg.get(\"lr_decay_rate\", None)\n warmup_start_lr = self.config.run_cfg.get(\"warmup_lr\", -1)\n warmup_steps = self.config.run_cfg.get(\"warmup_steps\", 0)\n iters_per_epoch = self.config.run_cfg.get(\"iters_per_epoch\", None)\n\n if iters_per_epoch is None:\n try:\n iters_per_epoch = len(self.dataloaders['train'])\n except (AttributeError, TypeError):\n iters_per_epoch = 10000\n\n self._lr_sched = lr_sched_cls(\n optimizer=self.optimizer,\n max_epoch=max_epoch,\n iters_per_epoch=iters_per_epoch,\n min_lr=min_lr,\n init_lr=init_lr,\n decay_rate=decay_rate,\n warmup_start_lr=warmup_start_lr,\n warmup_steps=warmup_steps,\n )\n\n return self._lr_sched\n\n @property\n def dataloaders(self) -> dict:\n \"\"\"\n A property to get and create dataloaders by split just in need.\n\n If no train_dataset_ratio is provided, concatenate map-style datasets and\n chain wds.DataPipe datasets separately. Training set becomes a tuple\n (ConcatDataset, ChainDataset), both are optional but at least one of them is\n required. The resultant ConcatDataset and ChainDataset will be sampled evenly.\n\n If train_dataset_ratio is provided, create a MultiIterLoader to sample\n each dataset by ratios during training.\n\n Currently do not support multiple datasets for validation and test.\n\n Returns:\n dict: {split_name: (tuples of) dataloader}\n \"\"\"\n if self._dataloaders is None:\n\n # concatenate map-style datasets and chain wds.DataPipe datasets separately\n # training set becomes a tuple (ConcatDataset, ChainDataset), both are\n # optional but at least one of them is required. The resultant ConcatDataset\n # and ChainDataset will be sampled evenly.\n logging.info(\n \"dataset_ratios not specified, datasets will be concatenated (map-style datasets) or chained (webdataset.DataPipeline).\"\n )\n\n datasets = reorg_datasets_by_split(self.datasets)\n self.datasets = datasets\n # self.datasets = concat_datasets(datasets)\n\n # print dataset statistics after concatenation/chaining\n for split_name in self.datasets:\n if isinstance(self.datasets[split_name], tuple) or isinstance(\n self.datasets[split_name], list\n ):\n # mixed wds.DataPipeline and torch.utils.data.Dataset\n num_records = sum(\n [\n len(d)\n if not type(d) in [wds.DataPipeline, ChainDataset]\n else 0\n for d in self.datasets[split_name]\n ]\n )\n\n else:\n if hasattr(self.datasets[split_name], \"__len__\"):\n # a single map-style dataset\n num_records = len(self.datasets[split_name])\n else:\n # a single wds.DataPipeline\n num_records = -1\n logging.info(\n \"Only a single wds.DataPipeline dataset, no __len__ attribute.\"\n )\n\n if num_records >= 0:\n logging.info(\n \"Loaded {} records for {} split from the dataset.\".format(\n num_records, split_name\n )\n )\n\n # create dataloaders\n split_names = sorted(self.datasets.keys())\n\n datasets = [self.datasets[split] for split in split_names]\n is_trains = [split in self.train_splits for split in split_names]\n\n batch_sizes = [\n self.config.run_cfg.batch_size_train\n if split == \"train\"\n else self.config.run_cfg.batch_size_eval\n for split in split_names\n ]\n\n collate_fns = []\n for dataset in datasets:\n if isinstance(dataset, tuple) or isinstance(dataset, list):\n collate_fns.append([getattr(d, \"collater\", None) for d in dataset])\n else:\n collate_fns.append(getattr(dataset, \"collater\", None))\n\n dataloaders = self.create_loaders(\n datasets=datasets,\n num_workers=self.config.run_cfg.num_workers,\n batch_sizes=batch_sizes,\n is_trains=is_trains,\n collate_fns=collate_fns,\n )\n\n self._dataloaders = {k: v for k, v in zip(split_names, dataloaders)}\n\n return self._dataloaders\n\n @property\n def cuda_enabled(self):\n return self.device.type == \"cuda\"\n\n @property\n def max_epoch(self):\n return int(self.config.run_cfg.max_epoch)\n\n @property\n def log_freq(self):\n log_freq = self.config.run_cfg.get(\"log_freq\", 50)\n return int(log_freq)\n\n @property\n def init_lr(self):\n return float(self.config.run_cfg.init_lr)\n\n @property\n def min_lr(self):\n return float(self.config.run_cfg.min_lr)\n\n @property\n def accum_grad_iters(self):\n return int(self.config.run_cfg.get(\"accum_grad_iters\", 1))\n\n @property\n def valid_splits(self):\n valid_splits = self.config.run_cfg.get(\"valid_splits\", [])\n\n if len(valid_splits) == 0:\n logging.info(\"No validation splits found.\")\n\n return valid_splits\n\n @property\n def test_splits(self):\n test_splits = self.config.run_cfg.get(\"test_splits\", [])\n\n return test_splits\n\n @property\n def train_splits(self):\n train_splits = self.config.run_cfg.get(\"train_splits\", [])\n\n if len(train_splits) == 0:\n logging.info(\"Empty train splits.\")\n\n return train_splits\n\n @property\n def evaluate_only(self):\n \"\"\"\n Set to True to skip training.\n \"\"\"\n return self.config.run_cfg.evaluate\n\n @property\n def use_dist_eval_sampler(self):\n return self.config.run_cfg.get(\"use_dist_eval_sampler\", True)\n\n @property\n def resume_ckpt_path(self):\n return self.config.run_cfg.get(\"resume_ckpt_path\", None)\n\n @property\n def train_loader(self):\n train_dataloader = self.dataloaders[\"train\"]\n\n return train_dataloader\n\n def setup_output_dir(self):\n lib_root = Path(registry.get_path(\"library_root\"))\n\n output_dir = lib_root / self.config.run_cfg.output_dir / self.job_id\n result_dir = output_dir / \"result\"\n\n output_dir.mkdir(parents=True, exist_ok=True)\n result_dir.mkdir(parents=True, exist_ok=True)\n\n registry.register_path(\"result_dir\", str(result_dir))\n registry.register_path(\"output_dir\", str(output_dir))\n\n self.result_dir = result_dir\n self.output_dir = output_dir\n \n def model_to_betrained(self):\n if self.use_distributed:\n return self.model.module.to_be_trained()\n else:\n return self.model.to_be_trained()\n\n def train(self):\n start_time = time.time()\n best_agg_metric = -100000\n best_epoch = 0\n not_change = 0\n self.set_model_mode(self.config.run_cfg.mode)\n \n\n self.log_config()\n stop_training_flag = False\n # resume from checkpoint if specified\n if not self.evaluate_only and self.resume_ckpt_path is not None:\n self._load_checkpoint(self.resume_ckpt_path)\n\n if not self.evaluate_only:# with training\n for cur_epoch in range(self.start_epoch, self.max_epoch):\n # training phase\n if not self.evaluate_only and self.model_to_betrained():\n logging.info(\"Start training\")\n # having lora or IDs are used\n train_stats = self.train_epoch(cur_epoch)\n self.log_stats(split_name=\"train\", stats=train_stats)\n # torch.cuda.empty_cache()\n \n \n # evaluation phase\n if len(self.valid_splits) > 0:\n for split_name in self.valid_splits:\n logging.info(\"Evaluating on {}.\".format(split_name))\n\n val_log = self.eval_epoch(\n split_name=split_name, cur_epoch=cur_epoch\n )\n # torch.cuda.empty_cache()\n \n if val_log is not None:\n if is_main_process():\n assert (\n \"agg_metrics\" in val_log\n ), \"No agg_metrics found in validation log.\"\n\n agg_metrics = val_log[\"agg_metrics\"]\n if agg_metrics > best_agg_metric and split_name == \"valid\":\n best_epoch, best_agg_metric = cur_epoch, agg_metrics\n\n self._save_checkpoint(cur_epoch, is_best=True)\n not_change = 0\n \n \n # logging.info(\"Evaluating on {}.\".format('test'))\n # test_log = self.eval_epoch(split_name='test', cur_epoch='best', skip_reload=True)\n # logging.info(\"testing result:\", test_log)\n\n val_log.update({\"best_epoch\": best_epoch})\n self.log_stats(val_log, split_name)\n not_change += 1\n # if not_change > 20: # early stop\n # break\n # torch.cuda.empty_cache()\n\n else:\n # if no validation split is provided, we just save the checkpoint at the end of each epoch.\n if not self.evaluate_only:\n self._save_checkpoint(cur_epoch, is_best=False)\n\n if self.evaluate_only:\n break\n\n if self.config.run_cfg.distributed:\n dist.barrier()\n if not self.model_to_betrained():\n break\n if not_change > 20:\n logging.info(\"Early stop. The results has not changed up to 20 epochs.\")\n break\n\n # testing phase, would only run when evaluate_only==True\n if self.evaluate_only:\n print(\"training finish or just evaluation...\")\n logging.info(\"Evaluating on {}.\".format(self.test_splits[0]))\n test_epoch = \"best\" if len(self.valid_splits) > 0 else cur_epoch\n self.evaluate(cur_epoch=test_epoch, skip_reload=self.evaluate_only)\n\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n logging.info(\"Training time {}\".format(total_time_str))\n self.set_model_mode(None) # recover to the default model\n\n def evaluate(self, cur_epoch=\"best\", skip_reload=False):\n test_logs = dict()\n\n if len(self.test_splits) > 0:\n for split_name in self.test_splits:\n test_logs[split_name] = self.eval_epoch(\n split_name=split_name, cur_epoch=cur_epoch, skip_reload=skip_reload\n )\n\n return test_logs\n\n def train_epoch(self, epoch):\n # train\n self.model.train()\n\n return self.task.train_epoch(\n epoch=epoch,\n model=self.model,\n data_loader=self.train_loader,\n optimizer=self.optimizer,\n scaler=self.scaler,\n lr_scheduler=self.lr_scheduler,\n cuda_enabled=self.cuda_enabled,\n log_freq=self.log_freq,\n accum_grad_iters=self.accum_grad_iters,\n )\n\n @torch.no_grad()\n def eval_epoch(self, split_name, cur_epoch, skip_reload=False):\n \"\"\"\n Evaluate the model on a given split.\n\n Args:\n split_name (str): name of the split to evaluate on.\n cur_epoch (int): current epoch.\n skip_reload_best (bool): whether to skip reloading the best checkpoint.\n During training, we will reload the best checkpoint for validation.\n During testing, we will use provided weights and skip reloading the best checkpoint .\n \"\"\"\n data_loader = self.dataloaders.get(split_name, None)\n assert data_loader, \"data_loader for split {} is None.\".format(split_name)\n\n # TODO In validation, you need to compute loss as well as metrics\n # TODO consider moving to model.before_evaluation()\n model = self.unwrap_dist_model(self.model)\n if not skip_reload and cur_epoch == \"best\":\n model = self._reload_best_model(model)\n model.eval()\n\n self.task.before_evaluation(\n model=model,\n dataset=self.datasets[split_name],\n )\n results = self.task.evaluation(model, data_loader)\n\n if results is not None:\n return self.task.after_evaluation(\n val_result=results,\n split_name=split_name,\n epoch=cur_epoch,\n )\n\n def unwrap_dist_model(self, model):\n if self.use_distributed:\n return model.module\n else:\n return model\n \n def set_model_mode(self,mode):\n if self.use_distributed:\n self.model.module.set_mode(mode)\n else:\n self.model.set_mode(mode)\n\n def create_loaders(\n self,\n datasets,\n num_workers,\n batch_sizes,\n is_trains,\n collate_fns,\n dataset_ratios=None,\n ):\n \"\"\"\n Create dataloaders for training and validation.\n \"\"\"\n\n def _create_loader(dataset, num_workers, bsz, is_train, collate_fn):\n # create a single dataloader for each split\n if isinstance(dataset, ChainDataset) or isinstance(\n dataset, wds.DataPipeline\n ):\n # wds.WebdDataset instance are chained together\n # webdataset.DataPipeline has its own sampler and collate_fn\n loader = iter(\n DataLoader(\n dataset,\n batch_size=bsz,\n num_workers=num_workers,\n pin_memory=True,\n )\n )\n else:\n # map-style dataset are concatenated together\n # setup distributed sampler\n if self.use_distributed:\n sampler = DistributedSampler(\n dataset,\n shuffle=is_train,\n num_replicas=get_world_size(),\n rank=get_rank(),\n )\n if not self.use_dist_eval_sampler:\n # e.g. retrieval evaluation\n sampler = sampler if is_train else None\n else:\n sampler = None\n\n loader = DataLoader(\n dataset,\n batch_size=bsz,\n num_workers=num_workers,\n pin_memory=True,\n sampler=sampler,\n shuffle=sampler is None and is_train,\n collate_fn=collate_fn,\n drop_last=True if is_train else False,\n )\n loader = PrefetchLoader(loader)\n\n if is_train:\n loader = IterLoader(loader, use_distributed=self.use_distributed)\n\n return loader\n\n loaders = []\n\n for dataset, bsz, is_train, collate_fn in zip(\n datasets, batch_sizes, is_trains, collate_fns\n ):\n if isinstance(dataset, list) or isinstance(dataset, tuple):\n if hasattr(dataset[0], 'sample_ratio') and dataset_ratios is None:\n dataset_ratios = [d.sample_ratio for d in dataset]\n loader = MultiIterLoader(\n loaders=[\n _create_loader(d, num_workers, bsz, is_train, collate_fn[i])\n for i, d in enumerate(dataset)\n ],\n ratios=dataset_ratios,\n )\n else:\n loader = _create_loader(dataset, num_workers, bsz, is_train, collate_fn)\n\n loaders.append(loader)\n\n return loaders\n\n @main_process\n def _save_checkpoint(self, cur_epoch, is_best=False):\n \"\"\"\n Save the checkpoint at the current epoch.\n \"\"\"\n model_no_ddp = self.unwrap_dist_model(self.model)\n param_grad_dic = {\n k: v.requires_grad for (k, v) in model_no_ddp.named_parameters()\n }\n state_dict = model_no_ddp.state_dict()\n for k in list(state_dict.keys()):\n if k in param_grad_dic.keys() and not param_grad_dic[k]:\n # delete parameters that do not require gradient\n del state_dict[k]\n save_obj = {\n \"model\": state_dict,\n \"optimizer\": self.optimizer.state_dict(),\n \"config\": self.config.to_dict(),\n \"scaler\": self.scaler.state_dict() if self.scaler else None,\n \"epoch\": cur_epoch,\n }\n save_to = os.path.join(\n self.output_dir,\n \"checkpoint_{}.pth\".format(\"best\" if is_best else cur_epoch),\n )\n logging.info(\"Saving checkpoint at epoch {} to {}.\".format(cur_epoch, save_to))\n torch.save(save_obj, save_to)\n\n def _reload_best_model(self, model):\n \"\"\"\n Load the best checkpoint for evaluation.\n \"\"\"\n checkpoint_path = os.path.join(self.output_dir, \"checkpoint_best.pth\")\n\n logging.info(\"Loading checkpoint from {}.\".format(checkpoint_path))\n checkpoint = torch.load(checkpoint_path, map_location=\"cpu\")\n try:\n model.load_state_dict(checkpoint[\"model\"])\n except RuntimeError as e:\n logging.warning(\n \"\"\"\n Key mismatch when loading checkpoint. This is expected if only part of the model is saved.\n Trying to load the model with strict=False.\n \"\"\"\n )\n model.load_state_dict(checkpoint[\"model\"], strict=False)\n return model\n\n def _load_checkpoint(self, url_or_filename):\n \"\"\"\n Resume from a checkpoint.\n \"\"\"\n if is_url(url_or_filename):\n cached_file = download_cached_file(\n url_or_filename, check_hash=False, progress=True\n )\n checkpoint = torch.load(cached_file, map_location=self.device)\n elif os.path.isfile(url_or_filename):\n checkpoint = torch.load(url_or_filename, map_location=self.device)\n else:\n raise RuntimeError(\"checkpoint url or path is invalid\")\n\n state_dict = checkpoint[\"model\"]\n self.unwrap_dist_model(self.model).load_state_dict(state_dict,strict=False)\n\n self.optimizer.load_state_dict(checkpoint[\"optimizer\"])\n if self.scaler and \"scaler\" in checkpoint:\n self.scaler.load_state_dict(checkpoint[\"scaler\"])\n\n self.start_epoch = checkpoint[\"epoch\"] + 1\n logging.info(\"Resume checkpoint from {}\".format(url_or_filename))\n\n @main_process\n def log_stats(self, stats, split_name):\n if isinstance(stats, dict):\n log_stats = {**{f\"{split_name}_{k}\": v for k, v in stats.items()}}\n with open(os.path.join(self.output_dir, \"log.txt\"), \"a\") as f:\n f.write(json.dumps(log_stats) + \"\\n\")\n elif isinstance(stats, list):\n pass\n\n @main_process\n def log_config(self):\n with open(os.path.join(self.output_dir, \"log.txt\"), \"a\") as f:\n f.write(json.dumps(self.config.to_dict(), indent=4) + \"\\n\")" } ]
import datetime import json import logging import os import time import torch import torch.distributed as dist import webdataset as wds from pathlib import Path from minigpt4.common.dist_utils import ( download_cached_file, get_rank, get_world_size, is_main_process, main_process, ) from minigpt4.common.registry import registry from minigpt4.common.utils import is_url from minigpt4.datasets.data_utils import concat_datasets, reorg_datasets_by_split, ChainDataset from minigpt4.datasets.datasets.dataloader_utils import ( IterLoader, MultiIterLoader, PrefetchLoader, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.data import DataLoader, DistributedSampler from minigpt4.runners.runner_base import RunnerBase
11,142
# shuffle=sampler is None and is_train, # collate_fn=collate_fn, # drop_last=True if is_train else False, # ) # loader = PrefetchLoader(loader) # if is_train: # loader = IterLoader(loader, use_distributed=self.use_distributed) # return loader # loaders = [] # for dataset, bsz, is_train, collate_fn in zip( # datasets, batch_sizes, is_trains, collate_fns # ): # if isinstance(dataset, list) or isinstance(dataset, tuple): # if hasattr(dataset[0], 'sample_ratio') and dataset_ratios is None: # dataset_ratios = [d.sample_ratio for d in dataset] # loader = MultiIterLoader( # loaders=[ # _create_loader(d, num_workers, bsz, is_train, collate_fn[i]) # for i, d in enumerate(dataset) # ], # ratios=dataset_ratios, # ) # else: # loader = _create_loader(dataset, num_workers, bsz, is_train, collate_fn) # loaders.append(loader) # return loaders # @main_process # def _save_checkpoint(self, cur_epoch, is_best=False): # """ # Save the checkpoint at the current epoch. # """ # model_no_ddp = self.unwrap_dist_model(self.model) # param_grad_dic = { # k: v.requires_grad for (k, v) in model_no_ddp.named_parameters() # } # state_dict = model_no_ddp.state_dict() # for k in list(state_dict.keys()): # if k in param_grad_dic.keys() and not param_grad_dic[k]: # # delete parameters that do not require gradient # del state_dict[k] # save_obj = { # "model": state_dict, # "optimizer": self.optimizer.state_dict(), # "config": self.config.to_dict(), # "scaler": self.scaler.state_dict() if self.scaler else None, # "epoch": cur_epoch, # } # save_to = os.path.join( # self.output_dir, # "checkpoint_{}.pth".format("best" if is_best else cur_epoch), # ) # logging.info("Saving checkpoint at epoch {} to {}.".format(cur_epoch, save_to)) # torch.save(save_obj, save_to) # def _reload_best_model(self, model): # """ # Load the best checkpoint for evaluation. # """ # checkpoint_path = os.path.join(self.output_dir, "checkpoint_best.pth") # logging.info("Loading checkpoint from {}.".format(checkpoint_path)) # checkpoint = torch.load(checkpoint_path, map_location="cpu") # try: # model.load_state_dict(checkpoint["model"]) # except RuntimeError as e: # logging.warning( # """ # Key mismatch when loading checkpoint. This is expected if only part of the model is saved. # Trying to load the model with strict=False. # """ # ) # model.load_state_dict(checkpoint["model"], strict=False) # return model # def _load_checkpoint(self, url_or_filename): # """ # Resume from a checkpoint. # """ # if is_url(url_or_filename): # cached_file = download_cached_file( # url_or_filename, check_hash=False, progress=True # ) # checkpoint = torch.load(cached_file, map_location=self.device) # elif os.path.isfile(url_or_filename): # checkpoint = torch.load(url_or_filename, map_location=self.device) # else: # raise RuntimeError("checkpoint url or path is invalid") # state_dict = checkpoint["model"] # self.unwrap_dist_model(self.model).load_state_dict(state_dict,strict=False) # self.optimizer.load_state_dict(checkpoint["optimizer"]) # if self.scaler and "scaler" in checkpoint: # self.scaler.load_state_dict(checkpoint["scaler"]) # self.start_epoch = checkpoint["epoch"] + 1 # logging.info("Resume checkpoint from {}".format(url_or_filename)) # @main_process # def log_stats(self, stats, split_name): # if isinstance(stats, dict): # log_stats = {**{f"{split_name}_{k}": v for k, v in stats.items()}} # with open(os.path.join(self.output_dir, "log.txt"), "a") as f: # f.write(json.dumps(log_stats) + "\n") # elif isinstance(stats, list): # pass # @main_process # def log_config(self): # with open(os.path.join(self.output_dir, "log.txt"), "a") as f: # f.write(json.dumps(self.config.to_dict(), indent=4) + "\n") @registry.register_runner("rec_runner_base")
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ # @registry.register_runner("rec_runner_base") # class RecRunnerBase: # """ # A runner class to train and evaluate a model given a task and datasets. # The runner uses pytorch distributed data parallel by default. Future release # will support other distributed frameworks. # """ # def __init__(self, cfg, task, model, datasets, job_id): # self.config = cfg # self.job_id = job_id # self.task = task # self.datasets = datasets # self._model = model # self._wrapped_model = None # self._device = None # self._optimizer = None # self._scaler = None # self._dataloaders = None # self._lr_sched = None # self.start_epoch = 0 # # self.setup_seeds() # self.setup_output_dir() # @property # def device(self): # if self._device is None: # self._device = torch.device(self.config.run_cfg.device) # return self._device # @property # def use_distributed(self): # return self.config.run_cfg.distributed # @property # def model(self): # """ # A property to get the DDP-wrapped model on the device. # """ # # move model to device # if self._model.device != self.device: # self._model = self._model.to(self.device) # # distributed training wrapper # if self.use_distributed: # if self._wrapped_model is None: # self._wrapped_model = DDP( # self._model, device_ids=[self.config.run_cfg.gpu] # ) # else: # self._wrapped_model = self._model # return self._wrapped_model # @property # def optimizer(self): # # TODO make optimizer class and configurations # if self._optimizer is None: # num_parameters = 0 # p_wd, p_non_wd = [], [] # for n, p in self.model.named_parameters(): # if not p.requires_grad: # continue # frozen weights # print(n) # if p.ndim < 2 or "bias" in n or "ln" in n or "bn" in n: # p_non_wd.append(p) # else: # p_wd.append(p) # num_parameters += p.data.nelement() # logging.info("number of trainable parameters: %d" % num_parameters) # optim_params = [ # { # "params": p_wd, # "weight_decay": float(self.config.run_cfg.weight_decay), # }, # {"params": p_non_wd, "weight_decay": 0}, # ] # beta2 = self.config.run_cfg.get("beta2", 0.999) # self._optimizer = torch.optim.AdamW( # optim_params, # lr=float(self.config.run_cfg.init_lr), # weight_decay=float(self.config.run_cfg.weight_decay), # betas=(0.9, beta2), # ) # return self._optimizer # @property # def scaler(self): # amp = self.config.run_cfg.get("amp", False) # if amp: # if self._scaler is None: # self._scaler = torch.cuda.amp.GradScaler() # return self._scaler # @property # def lr_scheduler(self): # """ # A property to get and create learning rate scheduler by split just in need. # """ # if self._lr_sched is None: # lr_sched_cls = registry.get_lr_scheduler_class(self.config.run_cfg.lr_sched) # # max_epoch = self.config.run_cfg.max_epoch # max_epoch = self.max_epoch # # min_lr = self.config.run_cfg.min_lr # min_lr = self.min_lr # # init_lr = self.config.run_cfg.init_lr # init_lr = self.init_lr # # optional parameters # decay_rate = self.config.run_cfg.get("lr_decay_rate", None) # warmup_start_lr = self.config.run_cfg.get("warmup_lr", -1) # warmup_steps = self.config.run_cfg.get("warmup_steps", 0) # iters_per_epoch = self.config.run_cfg.get("iters_per_epoch", None) # if iters_per_epoch is None: # try: # iters_per_epoch = len(self.dataloaders['train']) # except (AttributeError, TypeError): # iters_per_epoch = 10000 # self._lr_sched = lr_sched_cls( # optimizer=self.optimizer, # max_epoch=max_epoch, # iters_per_epoch=iters_per_epoch, # min_lr=min_lr, # init_lr=init_lr, # decay_rate=decay_rate, # warmup_start_lr=warmup_start_lr, # warmup_steps=warmup_steps, # ) # return self._lr_sched # @property # def dataloaders(self) -> dict: # """ # A property to get and create dataloaders by split just in need. # If no train_dataset_ratio is provided, concatenate map-style datasets and # chain wds.DataPipe datasets separately. Training set becomes a tuple # (ConcatDataset, ChainDataset), both are optional but at least one of them is # required. The resultant ConcatDataset and ChainDataset will be sampled evenly. # If train_dataset_ratio is provided, create a MultiIterLoader to sample # each dataset by ratios during training. # Currently do not support multiple datasets for validation and test. # Returns: # dict: {split_name: (tuples of) dataloader} # """ # if self._dataloaders is None: # # concatenate map-style datasets and chain wds.DataPipe datasets separately # # training set becomes a tuple (ConcatDataset, ChainDataset), both are # # optional but at least one of them is required. The resultant ConcatDataset # # and ChainDataset will be sampled evenly. # logging.info( # "dataset_ratios not specified, datasets will be concatenated (map-style datasets) or chained (webdataset.DataPipeline)." # ) # datasets = reorg_datasets_by_split(self.datasets) # self.datasets = datasets # # self.datasets = concat_datasets(datasets) # # print dataset statistics after concatenation/chaining # for split_name in self.datasets: # if isinstance(self.datasets[split_name], tuple) or isinstance( # self.datasets[split_name], list # ): # # mixed wds.DataPipeline and torch.utils.data.Dataset # num_records = sum( # [ # len(d) # if not type(d) in [wds.DataPipeline, ChainDataset] # else 0 # for d in self.datasets[split_name] # ] # ) # else: # if hasattr(self.datasets[split_name], "__len__"): # # a single map-style dataset # num_records = len(self.datasets[split_name]) # else: # # a single wds.DataPipeline # num_records = -1 # logging.info( # "Only a single wds.DataPipeline dataset, no __len__ attribute." # ) # if num_records >= 0: # logging.info( # "Loaded {} records for {} split from the dataset.".format( # num_records, split_name # ) # ) # # create dataloaders # split_names = sorted(self.datasets.keys()) # datasets = [self.datasets[split] for split in split_names] # is_trains = [split in self.train_splits for split in split_names] # batch_sizes = [ # self.config.run_cfg.batch_size_train # if split == "train" # else self.config.run_cfg.batch_size_eval # for split in split_names # ] # collate_fns = [] # for dataset in datasets: # if isinstance(dataset, tuple) or isinstance(dataset, list): # collate_fns.append([getattr(d, "collater", None) for d in dataset]) # else: # collate_fns.append(getattr(dataset, "collater", None)) # dataloaders = self.create_loaders( # datasets=datasets, # num_workers=self.config.run_cfg.num_workers, # batch_sizes=batch_sizes, # is_trains=is_trains, # collate_fns=collate_fns, # ) # self._dataloaders = {k: v for k, v in zip(split_names, dataloaders)} # return self._dataloaders # @property # def cuda_enabled(self): # return self.device.type == "cuda" # @property # def max_epoch(self): # return int(self.config.run_cfg.max_epoch) # @property # def log_freq(self): # log_freq = self.config.run_cfg.get("log_freq", 50) # return int(log_freq) # @property # def init_lr(self): # return float(self.config.run_cfg.init_lr) # @property # def min_lr(self): # return float(self.config.run_cfg.min_lr) # @property # def accum_grad_iters(self): # return int(self.config.run_cfg.get("accum_grad_iters", 1)) # @property # def valid_splits(self): # valid_splits = self.config.run_cfg.get("valid_splits", []) # if len(valid_splits) == 0: # logging.info("No validation splits found.") # return valid_splits # @property # def test_splits(self): # test_splits = self.config.run_cfg.get("test_splits", []) # return test_splits # @property # def train_splits(self): # train_splits = self.config.run_cfg.get("train_splits", []) # if len(train_splits) == 0: # logging.info("Empty train splits.") # return train_splits # @property # def evaluate_only(self): # """ # Set to True to skip training. # """ # return self.config.run_cfg.evaluate # @property # def use_dist_eval_sampler(self): # return self.config.run_cfg.get("use_dist_eval_sampler", True) # @property # def resume_ckpt_path(self): # return self.config.run_cfg.get("resume_ckpt_path", None) # @property # def train_loader(self): # train_dataloader = self.dataloaders["train"] # return train_dataloader # def setup_output_dir(self): # lib_root = Path(registry.get_path("library_root")) # output_dir = lib_root / self.config.run_cfg.output_dir / self.job_id # result_dir = output_dir / "result" # output_dir.mkdir(parents=True, exist_ok=True) # result_dir.mkdir(parents=True, exist_ok=True) # registry.register_path("result_dir", str(result_dir)) # registry.register_path("output_dir", str(output_dir)) # self.result_dir = result_dir # self.output_dir = output_dir # def train(self): # start_time = time.time() # best_agg_metric = 0 # best_epoch = 0 # self.log_config() # # resume from checkpoint if specified # if not self.evaluate_only and self.resume_ckpt_path is not None: # self._load_checkpoint(self.resume_ckpt_path) # for cur_epoch in range(self.start_epoch, self.max_epoch): # # training phase # if not self.evaluate_only: # logging.info("Start training") # train_stats = self.train_epoch(cur_epoch) # self.log_stats(split_name="train", stats=train_stats) # # evaluation phase # if len(self.valid_splits) > 0: # for split_name in self.valid_splits: # logging.info("Evaluating on {}.".format(split_name)) # val_log = self.eval_epoch( # split_name=split_name, cur_epoch=cur_epoch # ) # if val_log is not None: # if is_main_process(): # assert ( # "agg_metrics" in val_log # ), "No agg_metrics found in validation log." # agg_metrics = val_log["agg_metrics"] # if agg_metrics > best_agg_metric and split_name == "val": # best_epoch, best_agg_metric = cur_epoch, agg_metrics # self._save_checkpoint(cur_epoch, is_best=True) # val_log.update({"best_epoch": best_epoch}) # self.log_stats(val_log, split_name) # else: # # if no validation split is provided, we just save the checkpoint at the end of each epoch. # if not self.evaluate_only: # self._save_checkpoint(cur_epoch, is_best=False) # if self.evaluate_only: # break # if self.config.run_cfg.distributed: # dist.barrier() # # testing phase # test_epoch = "best" if len(self.valid_splits) > 0 else cur_epoch # self.evaluate(cur_epoch=test_epoch, skip_reload=self.evaluate_only) # total_time = time.time() - start_time # total_time_str = str(datetime.timedelta(seconds=int(total_time))) # logging.info("Training time {}".format(total_time_str)) # def evaluate(self, cur_epoch="best", skip_reload=False): # test_logs = dict() # if len(self.test_splits) > 0: # for split_name in self.test_splits: # test_logs[split_name] = self.eval_epoch( # split_name=split_name, cur_epoch=cur_epoch, skip_reload=skip_reload # ) # return test_logs # def train_epoch(self, epoch): # # train # self.model.train() # return self.task.train_epoch( # epoch=epoch, # model=self.model, # data_loader=self.train_loader, # optimizer=self.optimizer, # scaler=self.scaler, # lr_scheduler=self.lr_scheduler, # cuda_enabled=self.cuda_enabled, # log_freq=self.log_freq, # accum_grad_iters=self.accum_grad_iters, # ) # @torch.no_grad() # def eval_epoch(self, split_name, cur_epoch, skip_reload=False): # """ # Evaluate the model on a given split. # Args: # split_name (str): name of the split to evaluate on. # cur_epoch (int): current epoch. # skip_reload_best (bool): whether to skip reloading the best checkpoint. # During training, we will reload the best checkpoint for validation. # During testing, we will use provided weights and skip reloading the best checkpoint . # """ # self.model.eval() # data_loader = self.dataloaders.get(split_name, None) # assert data_loader, "data_loader for split {} is None.".format(split_name) # # TODO In validation, you need to compute loss as well as metrics # # TODO consider moving to model.before_evaluation() # model = self.unwrap_dist_model(self.model) # if not skip_reload and cur_epoch == "best": # model = self._reload_best_model(model) # model.eval() # self.task.before_evaluation( # model=model, # dataset=self.datasets[split_name], # ) # results = self.task.evaluation(model, data_loader) # if results is not None: # return self.task.after_evaluation( # val_result=results, # split_name=split_name, # epoch=cur_epoch, # ) # @torch.no_grad() # def eval_epoch_new(self, split_name, cur_epoch): # """ # Evaluate the model on a given split. # Args: # split_name (str): name of the split to evaluate on. # cur_epoch (int): current epoch. # skip_reload_best (bool): whether to skip reloading the best checkpoint. # During training, we will reload the best checkpoint for validation. # During testing, we will use provided weights and skip reloading the best checkpoint . # """ # data_loader = self.dataloaders.get(split_name, None) # assert data_loader, "data_loader for split {} is None.".format(split_name) # # TODO In validation, you need to compute loss as well as metrics # # TODO consider moving to model.before_evaluation() # model = self.unwrap_dist_model(self.model) # # if not skip_reload and cur_epoch == "best": # # model = self._reload_best_model(model) # model.eval() # self.task.before_evaluation( # model=model, # dataset=self.datasets[split_name], # ) # results = self.task.evaluation(model, data_loader) # return results # # if results is not None: # # return self.task.after_evaluation( # # val_result=results, # # split_name=split_name, # # epoch=cur_epoch, # # ) # def unwrap_dist_model(self, model): # if self.use_distributed: # return model.module # else: # return model # def create_loaders( # self, # datasets, # num_workers, # batch_sizes, # is_trains, # collate_fns, # dataset_ratios=None, # ): # """ # Create dataloaders for training and validation. # """ # def _create_loader(dataset, num_workers, bsz, is_train, collate_fn): # # create a single dataloader for each split # if isinstance(dataset, ChainDataset) or isinstance( # dataset, wds.DataPipeline # ): # # wds.WebdDataset instance are chained together # # webdataset.DataPipeline has its own sampler and collate_fn # loader = iter( # DataLoader( # dataset, # batch_size=bsz, # num_workers=num_workers, # pin_memory=True, # ) # ) # else: # # map-style dataset are concatenated together # # setup distributed sampler # if self.use_distributed: # sampler = DistributedSampler( # dataset, # shuffle=is_train, # num_replicas=get_world_size(), # rank=get_rank(), # ) # if not self.use_dist_eval_sampler: # # e.g. retrieval evaluation # sampler = sampler if is_train else None # else: # sampler = None # loader = DataLoader( # dataset, # batch_size=bsz, # num_workers=num_workers, # pin_memory=True, # sampler=sampler, # shuffle=sampler is None and is_train, # collate_fn=collate_fn, # drop_last=True if is_train else False, # ) # loader = PrefetchLoader(loader) # if is_train: # loader = IterLoader(loader, use_distributed=self.use_distributed) # return loader # loaders = [] # for dataset, bsz, is_train, collate_fn in zip( # datasets, batch_sizes, is_trains, collate_fns # ): # if isinstance(dataset, list) or isinstance(dataset, tuple): # if hasattr(dataset[0], 'sample_ratio') and dataset_ratios is None: # dataset_ratios = [d.sample_ratio for d in dataset] # loader = MultiIterLoader( # loaders=[ # _create_loader(d, num_workers, bsz, is_train, collate_fn[i]) # for i, d in enumerate(dataset) # ], # ratios=dataset_ratios, # ) # else: # loader = _create_loader(dataset, num_workers, bsz, is_train, collate_fn) # loaders.append(loader) # return loaders # @main_process # def _save_checkpoint(self, cur_epoch, is_best=False): # """ # Save the checkpoint at the current epoch. # """ # model_no_ddp = self.unwrap_dist_model(self.model) # param_grad_dic = { # k: v.requires_grad for (k, v) in model_no_ddp.named_parameters() # } # state_dict = model_no_ddp.state_dict() # for k in list(state_dict.keys()): # if k in param_grad_dic.keys() and not param_grad_dic[k]: # # delete parameters that do not require gradient # del state_dict[k] # save_obj = { # "model": state_dict, # "optimizer": self.optimizer.state_dict(), # "config": self.config.to_dict(), # "scaler": self.scaler.state_dict() if self.scaler else None, # "epoch": cur_epoch, # } # save_to = os.path.join( # self.output_dir, # "checkpoint_{}.pth".format("best" if is_best else cur_epoch), # ) # logging.info("Saving checkpoint at epoch {} to {}.".format(cur_epoch, save_to)) # torch.save(save_obj, save_to) # def _reload_best_model(self, model): # """ # Load the best checkpoint for evaluation. # """ # checkpoint_path = os.path.join(self.output_dir, "checkpoint_best.pth") # logging.info("Loading checkpoint from {}.".format(checkpoint_path)) # checkpoint = torch.load(checkpoint_path, map_location="cpu") # try: # model.load_state_dict(checkpoint["model"]) # except RuntimeError as e: # logging.warning( # """ # Key mismatch when loading checkpoint. This is expected if only part of the model is saved. # Trying to load the model with strict=False. # """ # ) # model.load_state_dict(checkpoint["model"], strict=False) # return model # def _load_checkpoint(self, url_or_filename): # """ # Resume from a checkpoint. # """ # if is_url(url_or_filename): # cached_file = download_cached_file( # url_or_filename, check_hash=False, progress=True # ) # checkpoint = torch.load(cached_file, map_location=self.device) # elif os.path.isfile(url_or_filename): # checkpoint = torch.load(url_or_filename, map_location=self.device) # else: # raise RuntimeError("checkpoint url or path is invalid") # state_dict = checkpoint["model"] # self.unwrap_dist_model(self.model).load_state_dict(state_dict,strict=False) # self.optimizer.load_state_dict(checkpoint["optimizer"]) # if self.scaler and "scaler" in checkpoint: # self.scaler.load_state_dict(checkpoint["scaler"]) # self.start_epoch = checkpoint["epoch"] + 1 # logging.info("Resume checkpoint from {}".format(url_or_filename)) # @main_process # def log_stats(self, stats, split_name): # if isinstance(stats, dict): # log_stats = {**{f"{split_name}_{k}": v for k, v in stats.items()}} # with open(os.path.join(self.output_dir, "log.txt"), "a") as f: # f.write(json.dumps(log_stats) + "\n") # elif isinstance(stats, list): # pass # @main_process # def log_config(self): # with open(os.path.join(self.output_dir, "log.txt"), "a") as f: # f.write(json.dumps(self.config.to_dict(), indent=4) + "\n") @registry.register_runner("rec_runner_base")
class RecRunnerBase(RunnerBase):
13
2023-10-29 12:47:25+00:00
16k
tobagin/whakarere
whakarere/windows/whakarere.py
[ { "identifier": "ConfigManager", "path": "whakarere/managers/config.py", "snippet": "class ConfigManager:\n def __init__(self, window):\n self.window = window\n self.config = {}\n self.config_file_path = os.path.expanduser(\"~/.config/whakarere/config.json\")\n atexit.register(self.save_config)\n\n def load_config(self):\n if os.path.exists(self.config_file_path):\n with open(self.config_file_path, \"r\") as f:\n self.config = json.load(f)\n\n def save_config(self):\n with open(self.config_file_path, \"w\") as f:\n json.dump(self.config, f)\n \n def set_config(self, key, value):\n self.config[key] = value\n\n def get_config(self, key):\n return self.config.get(key)" }, { "identifier": "SessionManager", "path": "whakarere/managers/session.py", "snippet": "class SessionManager:\n def __init__(self, window):\n self.window = window\n api_key = \"your_global_api_key_here\"\n self.api_url = \"http://localhost:3000\"\n self.headers = { 'x-api-key': api_key }\n self.current_session_id = None\n self.session_ids = []\n\n def add_session(self, session_id):\n if session_id not in self.session_ids:\n if self.check_session_id(session_id):\n self.session_ids.append(session_id)\n self.save_session_ids()\n else:\n self.terminate_session(session_id)\n session_id = self.add_session(self.generate_session_id())\n self.check_session_id(session_id)\n self.session_ids.append(session_id)\n self.save_session_ids()\n\n def remove_session(self, session_id):\n if session_id in self.session_ids:\n self.session_ids.remove(session_id)\n self.save_session_ids()\n if not self.check_session_status(session_id):\n self.terminate_session(session_id)\n\n def get_session_ids_size(self):\n return len(self.session_ids)\n\n def generate_session_id(self):\n return str(uuid.uuid4())\n\n def get_session(self, session_id):\n return self.session_ids.get(session_id)\n \n def set_current_session(self, session_id):\n self.current_session_id = session_id\n \n def get_current_session(self):\n return self.current_session_id\n\n def clear_current_session(self):\n self.current_session_id = None\n\n def get_session_ids(self):\n return self.session_ids\n \n def load_sessions(self):\n self.session_ids = self.window.config_manager.get_config(\"session_ids\")\n if self.session_ids is None:\n self.session_ids = []\n \n def save_session_ids(self):\n self.window.config_manager.set_config(\"session_ids\", self.session_ids)\n self.window.config_manager.save_config()\n \n def get_current_session_user_id(self):\n return self.window.whatsapp_manager.get_user_id(self.current_session_id)\n \n def check_session_status(self, session_id):\n url = self.api_url + f'/session/status/{session_id}'\n result = requests.get(url, headers=self.headers).json()[\"success\"]\n\n if(self.window.is_debug()):\n print(\"check_session_status: \" + str(result))\n \n return result \n\n def check_session_id(self, session_id):\n url = self.api_url + f'/session/start/{session_id}'\n result = requests.get(url, headers=self.headers).json()[\"success\"]\n\n if(self.window.is_debug()):\n print(\"check_session_id: \" + str(result))\n \n return result \n\n def terminate_session(self, session_id):\n url = self.api_url + f'/session/terminate/{session_id}'\n result = requests.get(url, headers=self.headers).json()[\"success\"]\n\n if(self.window.is_debug()):\n print(\"terminate_session: \" + str(result))\n \n return result \n \n def terminate_inactive_sessions(self):\n url = self.api_url + f'/session/terminateInactive'\n result = requests.get(url, headers=self.headers).json()[\"success\"]\n\n if(self.window.is_debug()):\n print(\"terminate_inactive_sessions: \" + str(result))\n \n return result \n\n def terminate_all_sessions(self, test=False):\n url = self.api_url + f'/session/terminateAll'\n result = requests.get(url, headers=self.headers).json()[\"success\"]\n\n if(self.window.is_debug()):\n print(\"terminate_inactive_sessions: \" + str(result))\n \n return result " }, { "identifier": "WhatsAppSessionManager", "path": "whakarere/managers/whatsapp.py", "snippet": "class WhatsAppSessionManager:\n def __init__(self, window):\n self.window = window\n api_key = \"your_global_api_key_here\"\n self.api_url = \"http://localhost:3000\"\n self.headers = { 'x-api-key': api_key }\n self.whatsapp_messenger_pages = {}\n self.chats = {} # Changed to a dictionary to map session IDs to chats\n self.chats_avatar = {} # Presumably for future functionality\n self.databases = {} # Changed to a dictionary to map session IDs to databases\n self.chat_messages = {} # Presumably for future functionality\n self.number = 0\n\n def load_or_create_databases(self):\n db_directory = os.path.expanduser(\"~/.config/whakarere/dbs\")\n\n # Ensure the database directory exists\n if not os.path.exists(db_directory):\n os.makedirs(db_directory)\n\n for session_id in self.window.session_manager.session_ids:\n db_file = f\"{session_id}.db\"\n db_path = os.path.join(db_directory, db_file)\n\n # Connect to the SQLite database\n conn = sqlite3.connect(db_path)\n cursor = conn.cursor()\n\n # Store the connection in the dictionary\n self.databases[session_id] = conn\n\n # Close the cursor\n cursor.close()\n\n def initialize(self):\n sessions_thread = threading.Thread(target=self.initialize_sessions)\n sessions_thread.start()\n\n def initialize_sessions(self):\n for session in self.window.session_manager.session_ids:\n if self.window.session_manager.check_session_status(session):\n result = self.get_chats(session) # Fixed assignment\n self.chats[session] = result # Store chats indexed by session ID\n for chat in result:\n chat_id = chat[\"id\"][\"_serialized\"]\n if chat[\"isGroup\"]:\n print(chat_id)\n try:\n self.chat_messages[chat_id] = self.chat_fetch_messages(chat_id, session)\n except: \n trimmed_chat_id = chat_id[-15:]\n print(trimmed_chat_id)\n self.chats[trimmed_chat_id] = self.chat_fetch_messages(trimmed_chat_id, session)\n else:\n self.chat_messages[chat_id] = self.chat_fetch_messages(chat_id, session)\n self.chats_avatar[chat_id] = self.get_user_profile_picture(chat_id, session)\n self.window.whatsapp_manager.add_whatsapp_messenger_page(session)\n\n def initialize_session_by_id(self, session_id):\n if self.window.session_manager.check_session_status(session_id):\n result = self.get_chats(session_id) # Fixed assignment\n self.chats[session_id] = result # Store chats indexed by session ID\n for chat in result:\n chat_id = chat[\"id\"][\"_serialized\"]\n if chat[\"isGroup\"]:\n print(chat_id)\n try:\n self.chat_messages[chat_id] = self.chat_fetch_messages(chat_id, session_id)\n except: \n trimmed_chat_id = chat_id[-15:]\n print(trimmed_chat_id)\n self.chats[trimmed_chat_id] = self.chat_fetch_messages(trimmed_chat_id, session_id)\n else:\n self.chat_messages[chat_id] = self.chat_fetch_messages(chat_id, session_id)\n self.chats_avatar[chat_id] = self.get_user_profile_picture(chat_id, session_id)\n if session_id not in self.whatsapp_sessions_pages:\n self.whatsapp_sessions_pages[session_id] = WhatsappMessengerPage(self, session_id)\n\n def navigate_to_whatsapp_messenger_page(self, session_id):\n # make it so it checks for for already open session on whatsapp_sessions_pages\n # if it has one and if doesn´t it creates a new one and pushes into the whatsapp_sessions_pages\n if session_id in self.whatsapp_sessions_pages:\n self.main_window.navigation_view.push(self.whatsapp_sessions_pages[session_id])\n else:\n self.add_whatsapp_messenger_page(session_id)\n self.main_window.navigation_view.push(self.whatsapp_sessions_pages[session_id])\n\n ############################\n # Chat methods\n ############################\n\n def get_chats(self, session_id):\n url = self.api_url + f'/client/getChats/{session_id}'\n result = requests.get(url, headers=self.headers).json()[\"chats\"]\n\n if(self.window.is_debug()):\n print(\"get_chats: \" + str(result))\n \n return result \n \n def chat_fetch_messages(self, chat_id, session_id):\n url = self.api_url + f'/chat/fetchMessages/{session_id}'\n result = requests.post(url, headers=self.headers, json={'chatId': chat_id})\n if(self.number == 3):\n print(result)\n\n json = result.json()\n\n if(self.window.is_debug()):\n print(\"get_chat_messages: \" + str(result))\n\n if(self.number == 3):\n print(json) \n self.number += 1\n \n return result \n\n def get_chats_by_id(self, session_id):\n return self.chats.get(session_id, [])\n\n def get_chat_avatar(self, chat_id):\n url = self.chats_avatar.get(chat_id, None)\n if url is not None:\n response = requests.get(url)\n loader = GdkPixbuf.PixbufLoader()\n loader.write(response.content)\n loader.close()\n return Gdk.Texture.new_for_pixbuf(loader.get_pixbuf())\n else:\n binary_data = base64.b64decode(UnknownContact.base64image)\n gbytes = GLib.Bytes.new(binary_data)\n input_stream = Gio.MemoryInputStream.new_from_bytes(gbytes)\n pixbuf = GdkPixbuf.Pixbuf.new_from_stream(input_stream, None)\n return Gdk.Texture.new_for_pixbuf(pixbuf)\n \n def get_user_profile_picture(self, userid, session_id):\n url = self.api_url + f'/client/getProfilePicUrl/{session_id}'\n try:\n result = requests.post(url, headers=self.headers, json={'contactId': userid}).json()[\"result\"]\n except:\n result = None\n\n if(self.window.is_debug()):\n print(\"get_user_profile_picture: \" + str(result))\n \n return result \n\n def get_user_id(self, session_id):\n url = self.api_url + f'/client/getClassInfo/{session_id}'\n result = requests.get(url, headers=self.headers).json()[\"sessionInfo\"][\"wid\"][\"_serialized\"] # Extract userid\n\n if(self.window.is_debug()):\n print(\"get_user_id: \" + str(result))\n \n return result \n\n def get_user_name(self, session_id):\n url = self.api_url + f'/client/getClassInfo/{session_id}'\n result = requests.get(url, headers=self.headers).json()[\"sessionInfo\"][\"pushname\"] # Return pushname\n\n if(self.window.is_debug()):\n print(\"get_user_name: \" + str(result))\n \n return result \n\n ############################\n # Contact methods\n ############################\n\n def get_contact_info(self, contact_id, session_id):\n url = self.api_url + f'/contact/getClassInfo/{session_id}'\n result = requests.post(url, headers=self.headers, json={'contactId': contact_id}).json()\n print(result)\n if(self.window.is_debug()):\n print(\"get_contact_info: \" + str(result))\n \n return result" }, { "identifier": "WindowTitlebarWidget", "path": "whakarere/widgets/titlebar.py", "snippet": "class WindowTitlebarWidget(Gtk.Box):\n def __init__(self):\n super().__init__(orientation=Gtk.Orientation.VERTICAL, spacing=2)\n self.label_title = Gtk.Label(label=\"Whakarere\")\n self.label_title.add_css_class(\"title\")\n self.label_subtitle = Gtk.Label(label=\"Available Sessions\")\n self.label_subtitle.add_css_class(\"subtitle\")\n self.append(self.label_title)\n self.append(self.label_subtitle)\n\n def set_title(self, title):\n self.label_title.set_label(title)\n\n def set_subtitle(self, subtitle):\n self.label_subtitle.set_label(subtitle)" }, { "identifier": "MainMenuButtonWidget", "path": "whakarere/widgets/main_menu.py", "snippet": "class MainMenuButtonWidget(Gtk.MenuButton):\n def __init__(self):\n super().__init__()\n # Create MainMenu Button Widget\n self.set_icon_name(\"open-menu-symbolic\")\n self.set_tooltip_text(\"Main Menu\")\n self.set_has_frame(False)\n self.set_direction(Gtk.ArrowType.DOWN)\n self.set_popover(Gtk.Popover())\n self.get_popover().set_position(Gtk.PositionType.BOTTOM)\n self.get_popover().set_has_arrow(True)\n self.get_popover().set_size_request(200, 200)\n self.get_popover().set_child(Gtk.Label(label=\"Main Menu\"))\n \n # About Button\n about_button = Gtk.Button()\n about_button.set_label(\"About Whakarere\")\n about_button.set_has_frame(False)\n about_button.connect(\"clicked\", self.on_about_clicked)\n \n # Keyboard Shortcuts Button\n shortcut_button = Gtk.Button()\n shortcut_button.set_label(\"Keyboard Shortcuts\")\n shortcut_button.set_has_frame(False)\n shortcut_button.connect(\"clicked\", self.on_shortcuts_clicked)\n \n # Preferences Button\n preferences_button = Gtk.Button()\n preferences_button.set_label(\"Preferences\")\n preferences_button.set_has_frame(False)\n preferences_button.connect(\"clicked\", self.on_preferences_clicked)\n\n settings_menu = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n separetor = Gtk.Separator(orientation=Gtk.Orientation.HORIZONTAL)\n settings_menu.append(separetor)\n settings_menu.append(preferences_button)\n settings_menu.append(shortcut_button)\n settings_menu.append(about_button)\n\n self.get_popover().set_child(settings_menu)\n\n def on_about_clicked(self, button):\n about_window = Adw.AboutWindow(modal=True, transient_for=self)\n about_window.set_application_icon(\"com.mudeprolinux.whakarere\")\n about_window.set_application_name(\"Whakarere\")\n about_window.set_version(\"0.1.0\")\n #about_window.set_comments(\"A Gtk4 Whatsapp Client.\")\n about_window.set_website(\"https://mudeprolinux.com\")\n about_window.set_developer_name(\"Mude Pro Linux\")\n about_window.set_developers([\"Thiago Fernandes <[email protected]>\"])\n about_window.set_designers([\"Thiago Fernandes <[email protected]>\"])\n about_window.set_license_type(Gtk.License.MIT_X11)\n about_window.set_copyright(\"2023 © Mude Pro Linux\")\n about_window.set_issue_url(\"https://github.com/tobagin/whakarere/issues\")\n\n # Show the About window\n about_window.present()\n \n def on_shortcuts_clicked(self, button):\n shortcuts_window = Gtk.ShortcutsWindow(modal=True, transient_for=self)\n shortcuts_section = Gtk.ShortcutsSection()\n shortcuts_group = Gtk.ShortcutsGroup()\n shortcuts_section.add_group(shortcuts_group)\n shortcuts_window.add_session(shortcuts_section)\n copy_shortcut = Gtk.Shortcut.new_from_string(\"<Ctrl>C\", Gtk.Label.new(\"Copy Selected Text\"))\n shortcuts_group.add(copy_shortcut)\n shortcuts_window.show()\n\n def on_preferences_clicked(self, button):\n pass" }, { "identifier": "SessionManagerPage", "path": "whakarere/pages/session.py", "snippet": "class SessionManagerPage(Adw.NavigationPage):\n def __init__(self, app_manager):\n super().__init__()\n self.set_title(\"Whakarere\")\n self.app_manager = app_manager\n self.set_can_pop(True)\n\n # Create TitleBar Widget\n self.window_titlebar_widget = WindowTitlebarWidget()\n\n # Create MainMenu Button Widget\n self.button_settings_menu = MainMenuButtonWidget()\n\n # Create HeaderBar\n self.page_headerbar = Adw.HeaderBar()\n self.page_headerbar.set_title_widget(self.window_titlebar_widget)\n self.page_headerbar.pack_end(self.button_settings_menu)\n\n if self.app_manager.is_dev():\n self.terminate_all_sessions = Gtk.Button()\n self.terminate_all_sessions.set_label(\"T.A.S.\") # Terminate All Sessions\n self.terminate_all_sessions.set_tooltip_text(\"Terminate All Sessions\")\n self.terminate_all_sessions.connect(\"clicked\", self.app_manager.whatsapp_manager.terminate_all_sessions)\n self.page_headerbar.pack_start(self.terminate_all_sessions)\n\n # Create Account List\n self.account_list = Gio.ListStore(item_type=AccountItem)\n for session_id in self.app_manager.session_manager.get_session_ids():\n account = AccountItem(session_id)\n self.account_list.append(account)\n\n # Factory function for creating list items\n factory = Gtk.SignalListItemFactory.new()\n factory.connect('bind', self.bind_function)\n\n # Create SingleSelection\n self.selected_item = None\n self.selected_item_position = None\n self.selection_model = Gtk.SingleSelection.new(self.account_list)\n self.selection_model.connect(\"selection-changed\", self.on_selection_changed)\n\n self.account_list.connect(\"items-changed\", self.on_items_changed)\n\n # Create ListView\n self.list_view = Gtk.ListView.new(self.selection_model, factory)\n\n # Create ScrolledWindow\n scrolled_window = Gtk.ScrolledWindow()\n scrolled_window.set_min_content_width(300)\n scrolled_window.set_min_content_height(300)\n scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)\n scrolled_window.set_child(self.list_view) # Set ListView as child of ScrolledWindow\n\n # Add session button\n self.button_add_session = Gtk.Button()\n self.button_add_session.icon_name = Gio.ThemedIcon(name=\"com.mudeprolinux.whakarere-add-session-symbolic.svg\")\n button_add_session_content = Adw.ButtonContent()\n button_add_session_content.set_icon_name(\"com.mudeprolinux.whakarere-add-session-symbolic\")\n button_add_session_content.add_css_class(\"svg-icon\")\n button_add_session_content.set_label(\"Add Session\")\n self.button_add_session.set_child(button_add_session_content)\n self.button_add_session.connect(\"clicked\", self.add_new_session)\n\n # Remove session button\n self.button_remove_session = Gtk.Button()\n button_remove_session_content = Adw.ButtonContent()\n button_remove_session_content.set_icon_name(\"com.mudeprolinux.whakarere-remove-session-symbolic\")\n button_remove_session_content.add_css_class(\"svg-icon\")\n button_remove_session_content.set_label(\"Remove Session\")\n self.button_remove_session.set_child(button_remove_session_content)\n self.button_remove_session.connect(\"clicked\", self.remove_selected_session)\n\n # Launch session button\n self.button_launch_session = Gtk.Button()\n self.button_launch_session.set_hexpand(True)\n self.button_launch_session.set_halign(Gtk.Align.CENTER)\n button_launch_session_content = Adw.ButtonContent()\n button_launch_session_content.set_icon_name(\"com.mudeprolinux.whakarere-launch-session-symbolic\")\n button_launch_session_content.add_css_class(\"svg-icon\")\n button_launch_session_content.set_label(\"Launch Session\")\n self.button_launch_session.set_child(button_launch_session_content)\n self.button_launch_session.connect(\"clicked\", self.launch_selected_session)\n\n # Activate session button\n self.button_activate_session = Gtk.Button()\n self.button_activate_session.set_hexpand(True)\n self.button_activate_session.set_halign(Gtk.Align.CENTER)\n button_activate_session_content = Adw.ButtonContent()\n button_activate_session_content.set_icon_name(\"com.mudeprolinux.whakarere-qr-code-symbolic\")\n button_activate_session_content.add_css_class(\"svg-icon\")\n button_activate_session_content.set_label(\"Scan QR\")\n self.button_activate_session.set_child(button_activate_session_content)\n self.button_activate_session.connect(\"clicked\", self.activate_selected_session)\n\n page_label = Gtk.Label(label=\"<b>Create a New Session.</b>\")\n page_label.set_use_markup(True)\n page_label.set_halign(Gtk.Align.CENTER)\n page_label.set_valign(Gtk.Align.CENTER)\n\n # Create content box for list view\n content_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n content_box.set_valign(Gtk.Align.CENTER) # Vertical alignment to center\n content_box.set_halign(Gtk.Align.CENTER) # Horizontal alignment to center\n content_box.set_margin_top(10)\n content_box.set_margin_bottom(10)\n content_box.set_margin_start(10)\n content_box.set_margin_end(10)\n content_box.set_hexpand(True)\n content_box.set_vexpand(True)\n content_box.append(scrolled_window)\n #content_box.append(self.action_bar)\n\n # a button bar for the bottom of the page\n button_bar = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)\n button_bar.set_halign(Gtk.Align.CENTER)\n button_bar.set_hexpand(True)\n button_bar.append(self.button_add_session)\n button_bar.append(self.button_launch_session)\n button_bar.append(self.button_activate_session)\n button_bar.append(self.button_remove_session)\n if self.app_manager.session_manager.get_session_ids_size() > 0:\n self.on_selection_changed(self.selection_model, None, None)\n if self.app_manager.whatsapp_manager.check_session_status(self.selected_item.session_id):\n self.button_launch_session.set_visible(True)\n self.button_activate_session.set_visible(False)\n else:\n self.button_launch_session.set_visible(False)\n self.button_activate_session.set_visible(True)\n else:\n self.button_launch_session.set_visible(False)\n self.button_activate_session.set_visible(False)\n\n bottom_bar = Adw.HeaderBar()\n bottom_bar.set_title_widget(button_bar)\n bottom_bar.set_show_back_button(False)\n bottom_bar.set_show_end_title_buttons(False)\n bottom_bar.set_show_start_title_buttons(False)\n\n # Create page content\n self.page_content = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n self.page_content.append(self.page_headerbar)\n self.page_content.append(content_box)\n self.page_content.append(bottom_bar)\n\n # Set page content\n self.set_child(self.page_content)\n \n def refresh_listview(self):\n # Update or refresh the data in the list store (modify as needed)\n self.account_list.remove_all()\n for session_id in self.app_manager.session_manager.get_session_ids():\n account = AccountItem(session_id)\n self.account_list.append(account)\n\n self.selection_model = Gtk.SingleSelection.new(self.account_list)\n # Notify the list view to refresh\n self.list_view.set_model(self.selection_model)\n\n def on_items_changed(self, list_store, position, removed, added):\n if not removed and self.app_manager.session_manager.get_session_ids_size() > 0:\n self.on_selection_changed(self.selection_model, None, None)\n if self.app_manager.whatsapp_manager.check_session_status(self.selected_item.session_id):\n self.button_launch_session.set_visible(True)\n self.button_activate_session.set_visible(False)\n else:\n self.button_launch_session.set_visible(False)\n self.button_activate_session.set_visible(True)\n else:\n self.button_launch_session.set_visible(False)\n self.button_activate_session.set_visible(False)\n\n def on_selection_changed(self, selection_model, positon, n_items):\n self.selected_item_position = selection_model.get_selected()\n self.selected_item = selection_model.get_selected_item()\n if self.selected_item is not None:\n if self.app_manager.whatsapp_manager.check_session_status(self.selected_item.session_id):\n self.button_launch_session.set_visible(True)\n self.button_activate_session.set_visible(False)\n else:\n self.button_launch_session.set_visible(False)\n self.button_activate_session.set_visible(True)\n\n def add_new_session(self, button):\n session_id = self.app_manager.session_manager.generate_session_id()\n self.app_manager.session_manager.add_session(session_id)\n self.account_list.append(AccountItem(session_id))\n\n def remove_selected_session(self, button):\n # Create a new message dialog\n dialog = Adw.MessageDialog(modal=True, transient_for=self.app_manager.main_window)\n dialog.set_heading(\"Delete Session\")\n dialog.set_body(\"Are you sure you want to delete the session?\")\n\n dialog.add_response(\"cancel\", \"_Cancel\")\n dialog.add_response(\"delete\", \"_Delete\")\n\n dialog.set_response_appearance(\"delete\", Adw.ResponseAppearance.DESTRUCTIVE)\n \n dialog.set_default_response(\"cancel\")\n dialog.set_close_response(\"cancel\")\n\n dialog.connect(\"response\", self.on_response)\n\n #self.add_overlay(dialog)\n dialog.set_visible(True)\n \n def on_response(self, dialog, response):\n if response == \"delete\":\n self.account_list.remove(self.selected_item_position)\n self.app_manager.session_manager.remove_session(self.selected_item.session_id)\n self.app_manager.whatsapp_manager.terminate_session(self.selected_item.session_id)\n self.on_selection_changed(self.selection_model, None, None)\n elif response == \"cancel\":\n pass\n dialog.destroy()\n\n def launch_selected_session(self, button):\n if self.selected_item is not None:\n self.app_manager.session_manager.set_current_session(self.selected_item.session_id)\n self.app_manager.navigate_to_whatsapp_messenger_page(self.selected_item.session_id)\n \n def activate_selected_session(self, button):\n if self.selected_item is not None:\n self.app_manager.session_manager.set_current_session(self.selected_item.session_id)\n self.app_manager.navigate_to_qr_manager_page(self.selected_item.session_id)\n\n def bind_function(self, factory, list_item):\n model = list_item.get_item()\n result = self.account_list.find(model)\n position = result.position\n if model is not None:\n is_session_active = self.app_manager.whatsapp_manager.check_session_status(model.session_id)\n print(is_session_active)\n if is_session_active:\n hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)\n hbox.set_halign(Gtk.Align.CENTER)\n avatar = Adw.Avatar()\n avatar.set_size(40)\n avatar.set_margin_top(5)\n avatar.set_margin_bottom(5)\n avatar.set_margin_start(5)\n avatar.set_halign(Gtk.Align.START)\n userid = self.app_manager.whatsapp_manager.get_user_id(model.session_id)\n response = requests.get(self.app_manager.whatsapp_manager.get_user_profile_picture(userid, model.session_id))\n response.raise_for_status()\n loader = GdkPixbuf.PixbufLoader()\n loader.write(response.content)\n loader.close()\n avatar_image = Gdk.Texture.new_for_pixbuf(loader.get_pixbuf())\n avatar.set_custom_image(avatar_image)\n hbox.append(avatar)\n label = Gtk.Label(label=f\"<b>{self.app_manager.whatsapp_manager.get_user_name(model.session_id)}</b>\")\n label.set_use_markup(True)\n hbox.append(label)\n list_item.set_child(hbox)\n else:\n hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)\n hbox.set_halign(Gtk.Align.CENTER)\n avatar = Adw.Avatar()\n avatar.set_size(40)\n avatar.set_margin_top(5)\n avatar.set_margin_bottom(5)\n avatar.set_margin_start(5)\n image_data = base64.b64decode(WhatsappLogoAlt.base64image)\n gbytes = GLib.Bytes.new_take(image_data)\n input_stream = Gio.MemoryInputStream.new_from_bytes(gbytes)\n pixbuf = GdkPixbuf.Pixbuf.new_from_stream(input_stream, None)\n texture = Gdk.Texture.new_for_pixbuf(pixbuf)\n avatar.set_custom_image(texture)\n hbox.append(avatar)\n label = Gtk.Label(label=\"<b>No account linked.</b>\")\n label.set_use_markup(True)\n hbox.append(label)\n list_item.set_child(hbox)" }, { "identifier": "SessionManagerPage2", "path": "whakarere/pages/session2.py", "snippet": "class SessionManagerPage2(Adw.NavigationPage):\n def __init__(self, window):\n super().__init__()\n self.set_title(\"Whakarere\")\n self.window = window\n self.set_can_pop(True)\n self.session_overlay = Gtk.Overlay()\n\n # Create TitleBar Widget\n self.window_titlebar_widget = WindowTitlebarWidget()\n self.window_titlebar_widget.set_title(\"Whakarere\")\n self.window_titlebar_widget.set_subtitle(\"A Gtk4 Whatsapp Client.\")\n # Create MainMenu Button Widget\n self.button_settings_menu = MainMenuButtonWidget()\n\n # Create HeaderBar\n self.page_headerbar = Adw.HeaderBar()\n self.page_headerbar.set_title_widget(self.window_titlebar_widget)\n self.page_headerbar.pack_end(self.button_settings_menu)\n self.add_session_button = Gtk.Button()\n self.add_session_button.set_icon_name(\"window-new-symbolic\")\n self.add_session_button.set_tooltip_text(\"Create a New Session\")\n self.add_session_button.connect(\"clicked\", self.add_new_session)\n self.page_headerbar.pack_end(self.add_session_button)\n\n if self.window.is_dev():\n self.terminate_all_sessions = Gtk.Button()\n self.terminate_all_sessions.set_label(\"T.A.S.\") # Terminate All Sessions\n self.terminate_all_sessions.set_tooltip_text(\"Terminate All Sessions\")\n self.terminate_all_sessions.connect(\"clicked\", self.window.whatsapp_manager.terminate_all_sessions)\n self.page_headerbar.pack_start(self.terminate_all_sessions)\n\n # Create Account List\n self.account_list = Gio.ListStore(item_type=AccountItem)\n for session_id in self.window.session_manager.get_session_ids():\n account = AccountItem(session_id)\n self.account_list.append(account)\n\n # Factory function for creating list items\n factory = Gtk.SignalListItemFactory.new()\n factory.connect('bind', self.bind_function)\n\n # Create SingleSelection\n self.selected_item = None\n self.selected_item_position = None\n self.selection_model = Gtk.SingleSelection.new(self.account_list)\n self.selection_model.connect(\"selection-changed\", self.on_selection_changed)\n\n self.account_list.connect(\"items-changed\", self.on_items_changed)\n\n # Create ListView\n self.list_view = Gtk.ListView.new(self.selection_model, factory)\n\n # Create ScrolledWindow\n scrolled_window = Gtk.ScrolledWindow()\n scrolled_window.set_min_content_width(300)\n scrolled_window.set_min_content_height(300)\n scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)\n scrolled_window.set_child(self.list_view) # Set ListView as child of ScrolledWindow\n\n # Add session button\n self.button_add_session = Gtk.Button()\n self.button_add_session.icon_name = Gio.ThemedIcon(name=\"com.mudeprolinux.whakarere-add-session-symbolic.svg\")\n button_add_session_content = Adw.ButtonContent()\n button_add_session_content.set_icon_name(\"com.mudeprolinux.whakarere-add-session-symbolic\")\n button_add_session_content.add_css_class(\"svg-icon\")\n button_add_session_content.set_label(\"Add Session\")\n self.button_add_session.set_child(button_add_session_content)\n self.button_add_session.connect(\"clicked\", self.add_new_session)\n\n # Remove session button\n self.button_remove_session = Gtk.Button()\n button_remove_session_content = Adw.ButtonContent()\n button_remove_session_content.set_icon_name(\"com.mudeprolinux.whakarere-remove-session-symbolic\")\n button_remove_session_content.add_css_class(\"svg-icon\")\n button_remove_session_content.set_label(\"Remove Session\")\n self.button_remove_session.set_child(button_remove_session_content)\n self.button_remove_session.connect(\"clicked\", self.remove_selected_session)\n\n # Launch session button\n self.button_launch_session = Gtk.Button()\n self.button_launch_session.set_hexpand(True)\n self.button_launch_session.set_halign(Gtk.Align.CENTER)\n button_launch_session_content = Adw.ButtonContent()\n button_launch_session_content.set_icon_name(\"com.mudeprolinux.whakarere-launch-session-symbolic\")\n button_launch_session_content.add_css_class(\"svg-icon\")\n button_launch_session_content.set_label(\"Launch Session\")\n self.button_launch_session.set_child(button_launch_session_content)\n self.button_launch_session.connect(\"clicked\", self.launch_selected_session)\n\n # Activate session button\n self.button_activate_session = Gtk.Button()\n self.button_activate_session.set_hexpand(True)\n self.button_activate_session.set_halign(Gtk.Align.CENTER)\n button_activate_session_content = Adw.ButtonContent()\n button_activate_session_content.set_icon_name(\"com.mudeprolinux.whakarere-qr-code-symbolic\")\n button_activate_session_content.add_css_class(\"svg-icon\")\n button_activate_session_content.set_label(\"Scan QR\")\n self.button_activate_session.set_child(button_activate_session_content)\n self.button_activate_session.connect(\"clicked\", self.activate_selected_session)\n\n page_label = Gtk.Label(label=\"<b>Create a New Session.</b>\")\n page_label.set_use_markup(True)\n page_label.set_halign(Gtk.Align.CENTER)\n page_label.set_valign(Gtk.Align.CENTER)\n\n # Create content box for list view\n content_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n content_box.set_valign(Gtk.Align.CENTER) # Vertical alignment to center\n content_box.set_halign(Gtk.Align.CENTER) # Horizontal alignment to center\n content_box.set_margin_top(10)\n content_box.set_margin_bottom(10)\n content_box.set_margin_start(10)\n content_box.set_margin_end(10)\n content_box.set_hexpand(True)\n content_box.set_vexpand(True)\n content_box.append(scrolled_window)\n #content_box.append(self.action_bar)\n\n # a button bar for the bottom of the page\n button_bar = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)\n button_bar.set_halign(Gtk.Align.CENTER)\n button_bar.set_hexpand(True)\n button_bar.append(self.button_add_session)\n button_bar.append(self.button_launch_session)\n button_bar.append(self.button_activate_session)\n button_bar.append(self.button_remove_session)\n if self.window.session_manager.get_session_ids_size() > 0:\n self.on_selection_changed(self.selection_model, None, None)\n if self.window.whatsapp_manager.check_session_status(self.selected_item.session_id):\n self.button_launch_session.set_visible(True)\n self.button_activate_session.set_visible(False)\n else:\n self.button_launch_session.set_visible(False)\n self.button_activate_session.set_visible(True)\n else:\n self.button_launch_session.set_visible(False)\n self.button_activate_session.set_visible(False)\n\n bottom_bar = Adw.HeaderBar()\n bottom_bar.set_title_widget(button_bar)\n bottom_bar.set_show_back_button(False)\n bottom_bar.set_show_end_title_buttons(False)\n bottom_bar.set_show_start_title_buttons(False)\n\n # Create page content\n self.page_content = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n self.page_content.append(self.page_headerbar)\n #self.page_content.append(content_box)\n #self.page_content.append(bottom_bar)\n self.session_overlay.set_child(self.page_content)\n # Set page content\n self.set_child(self.session_overlay)\n \n def refresh_listview(self):\n # Update or refresh the data in the list store (modify as needed)\n self.account_list.remove_all()\n for session_id in self.window.session_manager.get_session_ids():\n account = AccountItem(session_id)\n self.account_list.append(account)\n\n self.selection_model = Gtk.SingleSelection.new(self.account_list)\n # Notify the list view to refresh\n self.list_view.set_model(self.selection_model)\n\n def on_items_changed(self, list_store, position, removed, added):\n if not removed and self.window.session_manager.get_session_ids_size() > 0:\n self.on_selection_changed(self.selection_model, None, None)\n if self.window.whatsapp_manager.check_session_status(self.selected_item.session_id):\n self.button_launch_session.set_visible(True)\n self.button_activate_session.set_visible(False)\n else:\n self.button_launch_session.set_visible(False)\n self.button_activate_session.set_visible(True)\n else:\n self.button_launch_session.set_visible(False)\n self.button_activate_session.set_visible(False)\n\n def on_selection_changed(self, selection_model, positon, n_items):\n self.selected_item_position = selection_model.get_selected()\n self.selected_item = selection_model.get_selected_item()\n if self.selected_item is not None:\n if self.window.whatsapp_manager.check_session_status(self.selected_item.session_id):\n self.button_launch_session.set_visible(True)\n self.button_activate_session.set_visible(False)\n else:\n self.button_launch_session.set_visible(False)\n self.button_activate_session.set_visible(True)\n\n def add_new_session(self, button):\n #self.window.main_window.set_sensitive(False) # Disable main window \n new_account_wizard = AccountWizardWindow(self.app_manager)\n new_account_wizard.set_visible(True)\n\n def remove_selected_session(self, button):\n # Create a new message dialog\n dialog = Adw.MessageDialog(modal=True, transient_for=self.window.main_window)\n dialog.set_heading(\"Delete Session\")\n dialog.set_body(\"Are you sure you want to delete the session?\")\n\n dialog.add_response(\"cancel\", \"_Cancel\")\n dialog.add_response(\"delete\", \"_Delete\")\n\n dialog.set_response_appearance(\"delete\", Adw.ResponseAppearance.DESTRUCTIVE)\n \n dialog.set_default_response(\"cancel\")\n dialog.set_close_response(\"cancel\")\n\n dialog.connect(\"response\", self.on_response)\n\n #self.add_overlay(dialog)\n dialog.set_visible(True)\n \n def on_response(self, dialog, response):\n if response == \"delete\":\n self.account_list.remove(self.selected_item_position)\n self.window.session_manager.remove_session(self.selected_item.session_id)\n self.window.whatsapp_manager.terminate_session(self.selected_item.session_id)\n self.on_selection_changed(self.selection_model, None, None)\n elif response == \"cancel\":\n pass\n dialog.destroy()\n\n def launch_selected_session(self, button):\n if self.selected_item is not None:\n self.window.session_manager.set_current_session(self.selected_item.session_id)\n self.window.navigate_to_whatsapp_messenger_page(self.selected_item.session_id)\n \n def activate_selected_session(self, button):\n if self.selected_item is not None:\n self.window.session_manager.set_current_session(self.selected_item.session_id)\n self.window.navigate_to_qr_manager_page(self.selected_item.session_id)\n\n def bind_function(self, factory, list_item):\n model = list_item.get_item()\n result = self.account_list.find(model)\n position = result.position\n if model is not None:\n is_session_active = self.window.whatsapp_manager.check_session_status(model.session_id)\n print(is_session_active)\n if is_session_active:\n hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)\n hbox.set_halign(Gtk.Align.CENTER)\n avatar = Adw.Avatar()\n avatar.set_size(40)\n avatar.set_margin_top(5)\n avatar.set_margin_bottom(5)\n avatar.set_margin_start(5)\n avatar.set_halign(Gtk.Align.START)\n userid = self.window.whatsapp_manager.get_user_id(model.session_id)\n response = requests.get(self.window.whatsapp_manager.get_user_profile_picture(userid, model.session_id))\n response.raise_for_status()\n loader = GdkPixbuf.PixbufLoader()\n loader.write(response.content)\n loader.close()\n avatar_image = Gdk.Texture.new_for_pixbuf(loader.get_pixbuf())\n avatar.set_custom_image(avatar_image)\n hbox.append(avatar)\n label = Gtk.Label(label=f\"<b>{self.window.whatsapp_manager.get_user_name(model.session_id)}</b>\")\n label.set_use_markup(True)\n hbox.append(label)\n list_item.set_child(hbox)\n else:\n hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)\n hbox.set_halign(Gtk.Align.CENTER)\n avatar = Adw.Avatar()\n avatar.set_size(40)\n avatar.set_margin_top(5)\n avatar.set_margin_bottom(5)\n avatar.set_margin_start(5)\n image_data = base64.b64decode(WhatsappLogoAlt.base64image)\n gbytes = GLib.Bytes.new_take(image_data)\n input_stream = Gio.MemoryInputStream.new_from_bytes(gbytes)\n pixbuf = GdkPixbuf.Pixbuf.new_from_stream(input_stream, None)\n texture = Gdk.Texture.new_for_pixbuf(pixbuf)\n avatar.set_custom_image(texture)\n hbox.append(avatar)\n label = Gtk.Label(label=\"<b>No account linked.</b>\")\n label.set_use_markup(True)\n hbox.append(label)\n list_item.set_child(hbox)" }, { "identifier": "AccountWizardWindow", "path": "whakarere/windows/account_wizard.py", "snippet": "class AccountWizardWindow(Adw.Window):\n def __init__(self, window):\n super().__init__()\n self.window = window\n self.set_transient_for(window)\n self.set_modal(True)\n self.set_default_size(300, 300)\n self.connect(\"close-request\", self.on_modal_close_request)\n self.set_decorated(False)\n self.session_id = None\n\n api_key = \"your_global_api_key_here\"\n self.api_url = \"http://localhost:3000\"\n self.headers = { 'x-api-key': api_key }\n\n self.header_bar = Adw.HeaderBar()\n self.titlebar_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n self.title = Gtk.Label(label=\"Creating a Session\")\n self.subtitle = Gtk.Label(label=\"Please wait...\")\n self.titlebar_box.append(self.title)\n self.titlebar_box.append(self.subtitle)\n self.header_bar.set_title_widget(self.titlebar_box)\n\n self.window_content = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=20)\n self.window_content.set_size_request(200, 300)\n\n image = Gtk.Image.new_from_icon_name(\"com.mudeprolinux.whakarere\")\n image.set_pixel_size(120)\n label_title = Gtk.Label(label=\"Welcome to Whakarere\")\n label_title.set_halign(Gtk.Align.CENTER)\n label_message = Gtk.Label(label=\"Let me create a new session and I'll help you link it to your WhatsApp account.\")\n label_message.set_halign(Gtk.Align.CENTER)\n\n self.progress_bar_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=20)\n self.progress_bar = Gtk.ProgressBar()\n self.progress_bar.set_fraction(0.0)\n self.progress_bar.set_show_text(False)\n self.progress_bar.set_pulse_step(1)\n\n self.label_progress = Gtk.Label(label=\"Creating session...\")\n self.label_progress.set_halign(Gtk.Align.CENTER)\n self.label_progress.set_margin_top(0)\n self.progress_bar_box.append(self.progress_bar)\n self.progress_bar_box.append(self.label_progress)\n self.progress_bar_box.set_margin_top(40)\n self.progress_bar_box.set_margin_bottom(40)\n self.progress_bar_box.set_margin_start(20)\n self.progress_bar_box.set_margin_end(20)\n\n self.session_id = self.window.session_manager.generate_session_id()\n self.window.session_manager.add_session(self.session_id)\n thread = threading.Thread(target=self.update_progress_bar)\n thread.start()\n\n self.top_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=20)\n self.top_box.set_margin_top(20)\n self.top_box.set_halign(Gtk.Align.CENTER)\n self.top_box.set_valign(Gtk.Align.CENTER)\n self.top_box.append(image)\n self.top_box.append(label_title)\n self.top_box.append(label_message)\n self.top_box.set_margin_start(20)\n self.top_box.set_margin_end(20)\n \n self.window_content = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=20)\n self.window_content.set_size_request(200, 300)\n self.window_content.append(self.header_bar)\n self.window_content.append(self.top_box)\n self.window_content.append(self.progress_bar_box)\n self.set_content(self.window_content)\n self.present()\n\n def on_modal_close_request(self, widget):\n self.window.session_manager.remove_session(self.session_id)\n self.destroy()\n\n def update_progress_bar(self):\n self.label_progress.set_text(\"Creating session...\")\n for i in range(1, 11):\n self.progress_bar.set_fraction(i / 100)\n time.sleep(0.2)\n self.label_progress.set_text(\"Launching session...\")\n for i in range(11, 21):\n self.progress_bar.set_fraction(i / 100)\n time.sleep(0.2)\n self.label_progress.set_text(\"Waiting for session activation...\")\n for i in range(21, 31):\n self.progress_bar.set_fraction(i / 100)\n time.sleep(0.2)\n self.label_progress.set_text(\"Capturing QR code...\")\n for i in range(31, 41):\n self.progress_bar.set_fraction(i / 100)\n time.sleep(0.2)\n self.label_progress.set_text(\"Generating QR code...\")\n for i in range(41, 51):\n self.progress_bar.set_fraction(i / 100)\n time.sleep(0.2)\n \n self.progress_bar.pulse()\n self.label_progress.set_text(\"Please scan QR code to continue...\")\n self.progress_bar.pulse()\n self.qr_code_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=20)\n self.progress_bar.pulse()\n qr_code_data = self.get_qr_code_image(self.session_id)\n self.progress_bar.pulse()\n glib_bytes = GLib.Bytes.new(qr_code_data)\n self.progress_bar.pulse()\n input_stream = Gio.MemoryInputStream.new_from_bytes(glib_bytes)\n self.progress_bar.pulse()\n pixbuf = GdkPixbuf.Pixbuf.new_from_stream(input_stream, None)\n self.progress_bar.pulse()\n self.qr_code_image = Gtk.Image.new_from_pixbuf(pixbuf)\n self.progress_bar.pulse()\n self.qr_code_image.set_pixel_size(240)\n self.progress_bar.pulse()\n self.qr_code_box.append(self.qr_code_image)\n self.progress_bar.pulse()\n self.window_content.remove(self.top_box)\n self.progress_bar.pulse()\n self.window_content.insert_child_after(self.qr_code_box, self.header_bar)\n self.progress_bar.pulse()\n\n while not self.check_session_status(self.qr_code_image):\n self.progress_bar.pulse()\n time.sleep(1)\n\n self.progress_bar.set_fraction(0.50)\n\n self.label_progress.set_text(\"Syncing your chats...\")\n self.window.whatsapp_manager.initialize_session_by_id(self.session_id)\n for i in range(51, 71):\n self.progress_bar.set_fraction(i / 100)\n time.sleep(0.4)\n\n self.label_progress.set_text(\"Done!\")\n\n def generate_qr_code(self, qr_code_data):\n qr = qrcode.QRCode(version=1, error_correction=qrcode.constants.ERROR_CORRECT_L, box_size=10, border=4)\n qr.add_data(qr_code_data)\n qr.make(fit=True)\n return qr.make_image(fill_color=\"black\", back_color=\"white\")\n\n def get_qr_code_texture(self, qr_code_data):\n qr_image = self.generate_qr_code(qr_code_data)\n pixbuf = self.pil_image_to_pixbuf(qr_image)\n return Gdk.Texture.new_for_pixbuf(pixbuf)\n\n def pil_image_to_pixbuf(self, pil_image):\n \"\"\"Convert a PIL image to a GdkPixbuf.\"\"\"\n buffer = BytesIO()\n pil_image.save(buffer)\n glib_bytes = GLib.Bytes.new(buffer.getvalue())\n loader = GdkPixbuf.PixbufLoader.new_with_type(\"png\")\n loader.write_bytes(glib_bytes)\n pixbuf = loader.get_pixbuf()\n loader.close()\n return pixbuf\n\n def get_qr_code_image(self, session_id):\n url = self.api_url + f'/session/qr/{session_id}/image'\n result = requests.get(url, headers=self.headers).content\n\n if(self.window.is_debug()):\n print(\"get_qr_code_image: \" + str(result))\n \n return result\n\n def get_qr_code_data(self, session_id):\n url = self.api_url + f'/session/qr/{session_id}'\n result = ((requests.get(url, headers=self.headers)).json())[\"qr\"]\n\n if(self.window.is_debug()):\n print(\"get_qr_code_data: \" + str(result))\n \n return result\n \n def check_session_status(self, session_id):\n url = self.api_url + f'/session/status/{session_id}'\n result = requests.get(url, headers=self.headers).json()[\"success\"]\n\n if(self.window.is_debug()):\n print(\"check_session_status: \" + str(result))\n \n return result" } ]
import gi from whakarere.managers.config import ConfigManager from whakarere.managers.session import SessionManager from whakarere.managers.whatsapp import WhatsAppSessionManager from whakarere.widgets.titlebar import WindowTitlebarWidget from whakarere.widgets.main_menu import MainMenuButtonWidget from whakarere.pages.session import SessionManagerPage from whakarere.pages.session2 import SessionManagerPage2 from whakarere.windows.account_wizard import AccountWizardWindow from gi.repository import Adw, Gtk, Gdk
11,746
gi.require_version('Gtk', '4.0') gi.require_version('Adw', '1') gi.require_version("Gdk", "4.0") class WhakarereMainWindow(Adw.ApplicationWindow): def __init__(self, app, debug=False, dev=False): super().__init__(application=app) self.app = app self.debug = debug self.dev = dev self.settings = Gtk.Settings.get_default() self.settings.connect("notify::gtk-theme-name", self.on_theme_changed) # Initial CSS application self.update_css_for_theme() # Set the window size and default close behavior self.set_default_size(800, 600) self.set_hide_on_close(True) # Create the config manager and load the config file self.config_manager = ConfigManager(self) self.config_manager.load_config() # Create the session manager and load the sessions self.session_manager = SessionManager(self) self.session_manager.load_sessions() # Create the whatsapp manager and initialize the active sessions
gi.require_version('Gtk', '4.0') gi.require_version('Adw', '1') gi.require_version("Gdk", "4.0") class WhakarereMainWindow(Adw.ApplicationWindow): def __init__(self, app, debug=False, dev=False): super().__init__(application=app) self.app = app self.debug = debug self.dev = dev self.settings = Gtk.Settings.get_default() self.settings.connect("notify::gtk-theme-name", self.on_theme_changed) # Initial CSS application self.update_css_for_theme() # Set the window size and default close behavior self.set_default_size(800, 600) self.set_hide_on_close(True) # Create the config manager and load the config file self.config_manager = ConfigManager(self) self.config_manager.load_config() # Create the session manager and load the sessions self.session_manager = SessionManager(self) self.session_manager.load_sessions() # Create the whatsapp manager and initialize the active sessions
self.whatsapp_manager = WhatsAppSessionManager(self)
2
2023-10-29 15:46:50+00:00
16k
KHU-VLL/CAST
dataset/datasets.py
[ { "identifier": "TubeMaskingGenerator", "path": "util_tools/masking_generator.py", "snippet": "class TubeMaskingGenerator:\n def __init__(self, input_size, mask_ratio):\n self.frames, self.height, self.width = input_size\n self.num_patches_per_frame = self.height * self.width\n self.total_patches = self.frames * self.num_patches_per_frame \n self.num_masks_per_frame = int(mask_ratio * self.num_patches_per_frame)\n self.total_masks = self.frames * self.num_masks_per_frame\n\n def __repr__(self):\n repr_str = \"Maks: total patches {}, mask patches {}\".format(\n self.total_patches, self.total_masks\n )\n return repr_str\n\n def __call__(self):\n mask_per_frame = np.hstack([\n np.zeros(self.num_patches_per_frame - self.num_masks_per_frame),\n np.ones(self.num_masks_per_frame),\n ])\n np.random.shuffle(mask_per_frame)\n mask = np.tile(mask_per_frame, (self.frames,1)).flatten()\n return mask " }, { "identifier": "VideoClsDataset", "path": "dataset/kinetics.py", "snippet": "class VideoClsDataset(Dataset):\n \"\"\"Load your own video classification dataset.\"\"\"\n\n def __init__(self, anno_path, data_path, mode='train', clip_len=8,\n frame_sample_rate=2, crop_size=224, short_side_size=256,\n new_height=256, new_width=340, keep_aspect_ratio=True,\n num_segment=1, num_crop=1, test_num_segment=10, test_num_crop=3,args=None):\n self.anno_path = anno_path\n self.data_path = data_path\n self.mode = mode\n self.clip_len = clip_len\n self.frame_sample_rate = frame_sample_rate\n self.crop_size = crop_size\n self.short_side_size = short_side_size\n self.new_height = new_height\n self.new_width = new_width\n self.keep_aspect_ratio = keep_aspect_ratio\n self.num_segment = num_segment\n self.test_num_segment = test_num_segment\n self.num_crop = num_crop\n self.test_num_crop = test_num_crop\n self.args = args\n self.aug = False\n self.rand_erase = False\n if self.mode in ['train']:\n self.aug = True\n if self.args.reprob > 0:\n self.rand_erase = True\n if VideoReader is None:\n raise ImportError(\"Unable to import `decord` which is required to read videos.\")\n\n import pandas as pd\n cleaned = pd.read_csv(self.anno_path, header=None, delimiter=' ')\n self.dataset_samples = list(cleaned.values[:, 0])\n self.label_array = list(cleaned.values[:, 1])\n\n if (mode == 'train'):\n pass\n\n elif (mode == 'validation'):\n self.data_transform = video_transforms.Compose([\n video_transforms.Resize(self.short_side_size, interpolation='bilinear'),\n video_transforms.CenterCrop(size=(self.crop_size, self.crop_size)),\n volume_transforms.ClipToTensor(),\n video_transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n elif mode == 'test':\n self.data_resize = video_transforms.Compose([\n video_transforms.Resize(size=(short_side_size), interpolation='bilinear')\n ])\n self.data_transform = video_transforms.Compose([\n volume_transforms.ClipToTensor(),\n video_transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n self.test_seg = []\n self.test_dataset = []\n self.test_label_array = []\n for ck in range(self.test_num_segment):\n for cp in range(self.test_num_crop):\n for idx in range(len(self.label_array)):\n sample_label = self.label_array[idx]\n self.test_label_array.append(sample_label)\n self.test_dataset.append(self.dataset_samples[idx])\n self.test_seg.append((ck, cp))\n\n def __getitem__(self, index):\n if self.mode == 'train':\n args = self.args \n scale_t = 1\n\n sample = self.dataset_samples[index]\n sample = os.path.join(self.data_path,'train',sample)# self.data_path + '/videos_train/' + sample\n buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t) # T H W C\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\"video {} not correctly loaded during training\".format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t)\n\n if args.num_sample > 1:\n frame_list = []\n label_list = []\n index_list = []\n for _ in range(args.num_sample):\n new_frames = self._aug_frame(buffer, args)\n label = self.label_array[index]\n frame_list.append(new_frames)\n label_list.append(label)\n index_list.append(index)\n return frame_list, label_list, index_list, {}\n else:\n buffer = self._aug_frame(buffer, args)\n \n return buffer, self.label_array[index], index, {}\n\n elif self.mode == 'validation':\n sample = self.dataset_samples[index]\n sample = os.path.join(self.data_path,'val',sample)# self.data_path + '/videos_train/' + sample\n buffer = self.loadvideo_decord(sample)\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\"video {} not correctly loaded during validation\".format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample)\n buffer = self.data_transform(buffer)\n return buffer, self.label_array[index], sample.split(\"/\")[-1].split(\".\")[0]\n\n elif self.mode == 'test':\n sample = self.test_dataset[index]\n sample = os.path.join(self.data_path,'val',sample)# self.data_path + '/videos_train/' + sample\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.loadvideo_decord(sample)\n\n while len(buffer) == 0:\n warnings.warn(\"video {}, temporal {}, spatial {} not found during testing\".format(\\\n str(self.test_dataset[index]), chunk_nb, split_nb))\n index = np.random.randint(self.__len__())\n sample = self.test_dataset[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.loadvideo_decord(sample)\n\n buffer = self.data_resize(buffer)\n if isinstance(buffer, list):\n buffer = np.stack(buffer, 0)\n\n spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \\\n / (self.test_num_crop - 1)\n temporal_step = max(1.0 * (buffer.shape[0] - self.clip_len) \\\n / (self.test_num_segment - 1), 0)\n temporal_start = int(chunk_nb * temporal_step)\n spatial_start = int(split_nb * spatial_step)\n if buffer.shape[1] >= buffer.shape[2]:\n buffer = buffer[temporal_start:temporal_start + self.clip_len, \\\n spatial_start:spatial_start + self.short_side_size, :, :]\n else:\n buffer = buffer[temporal_start:temporal_start + self.clip_len, \\\n :, spatial_start:spatial_start + self.short_side_size, :]\n\n buffer = self.data_transform(buffer)\n return buffer, self.test_label_array[index], sample.split(\"/\")[-1].split(\".\")[0], \\\n chunk_nb, split_nb\n else:\n raise NameError('mode {} unkown'.format(self.mode))\n\n def _aug_frame(\n self,\n buffer,\n args,\n ):\n\n aug_transform = video_transforms.create_random_augment(\n input_size=(self.crop_size, self.crop_size),\n auto_augment=args.aa,\n interpolation=args.train_interpolation,\n )\n\n buffer = [\n transforms.ToPILImage()(frame) for frame in buffer\n ]\n\n buffer = aug_transform(buffer)\n\n buffer = [transforms.ToTensor()(img) for img in buffer]\n buffer = torch.stack(buffer) # T C H W\n buffer = buffer.permute(0, 2, 3, 1) # T H W C \n \n # T H W C \n buffer = tensor_normalize(\n buffer, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n )\n # T H W C -> C T H W.\n buffer = buffer.permute(3, 0, 1, 2)\n # Perform data augmentation.\n scl, asp = (\n [0.08, 1.0],\n [0.75, 1.3333],\n )\n\n buffer = spatial_sampling(\n buffer,\n spatial_idx=-1,\n min_scale=256,\n max_scale=320,\n crop_size=self.crop_size,\n random_horizontal_flip=False if args.data_set == 'SSV2' else True ,\n inverse_uniform_sampling=False,\n aspect_ratio=asp,\n scale=scl,\n motion_shift=False\n )\n\n if self.rand_erase:\n erase_transform = RandomErasing(\n args.reprob,\n mode=args.remode,\n max_count=args.recount,\n num_splits=args.recount,\n device=\"cpu\",\n )\n buffer = buffer.permute(1, 0, 2, 3)\n buffer = erase_transform(buffer)\n buffer = buffer.permute(1, 0, 2, 3)\n\n return buffer\n\n\n def loadvideo_decord(self, sample, sample_rate_scale=1):\n \"\"\"Load video content using Decord\"\"\"\n fname = sample\n\n if not (os.path.exists(fname)):\n return []\n\n # avoid hanging issue\n if os.path.getsize(fname) < 1 * 1024:\n print('SKIP: ', fname, \" - \", os.path.getsize(fname))\n return []\n try:\n if self.keep_aspect_ratio:\n vr = VideoReader(fname, num_threads=1, ctx=cpu(0))\n else:\n vr = VideoReader(fname, width=self.new_width, height=self.new_height,\n num_threads=1, ctx=cpu(0))\n except:\n print(\"video cannot be loaded by decord: \", fname)\n return []\n\n if self.mode == 'test':\n all_index = [x for x in range(0, len(vr), self.frame_sample_rate)]\n while len(all_index) < self.clip_len:\n all_index.append(all_index[-1])\n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n\n # handle temporal segments\n converted_len = int(self.clip_len * self.frame_sample_rate)\n seg_len = len(vr) // self.num_segment\n\n all_index = []\n for i in range(self.num_segment):\n if seg_len <= converted_len:\n index = np.linspace(0, seg_len, num=seg_len // self.frame_sample_rate)\n index = np.concatenate((index, np.ones(self.clip_len - seg_len // self.frame_sample_rate) * seg_len))\n index = np.clip(index, 0, seg_len - 1).astype(np.int64)\n else:\n end_idx = np.random.randint(converted_len, seg_len)\n str_idx = end_idx - converted_len\n index = np.linspace(str_idx, end_idx, num=self.clip_len)\n index = np.clip(index, str_idx, end_idx - 1).astype(np.int64)\n index = index + i*seg_len\n all_index.extend(list(index))\n\n all_index = all_index[::int(sample_rate_scale)]\n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n\n def __len__(self):\n if self.mode != 'test':\n return len(self.dataset_samples)\n else:\n return len(self.test_dataset)" }, { "identifier": "VideoMAE", "path": "dataset/kinetics.py", "snippet": "class VideoMAE(torch.utils.data.Dataset):\n \"\"\"Load your own video classification dataset.\n Parameters\n ----------\n root : str, required.\n Path to the root folder storing the dataset.\n setting : str, required.\n A text file describing the dataset, each line per video sample.\n There are three items in each line: (1) video path; (2) video length and (3) video label.\n train : bool, default True.\n Whether to load the training or validation set.\n test_mode : bool, default False.\n Whether to perform evaluation on the test set.\n Usually there is three-crop or ten-crop evaluation strategy involved.\n name_pattern : str, default None.\n The naming pattern of the decoded video frames.\n For example, img_00012.jpg.\n video_ext : str, default 'mp4'.\n If video_loader is set to True, please specify the video format accordinly.\n is_color : bool, default True.\n Whether the loaded image is color or grayscale.\n modality : str, default 'rgb'.\n Input modalities, we support only rgb video frames for now.\n Will add support for rgb difference image and optical flow image later.\n num_segments : int, default 1.\n Number of segments to evenly divide the video into clips.\n A useful technique to obtain global video-level information.\n Limin Wang, etal, Temporal Segment Networks: Towards Good Practices for Deep Action Recognition, ECCV 2016.\n num_crop : int, default 1.\n Number of crops for each image. default is 1.\n Common choices are three crops and ten crops during evaluation.\n new_length : int, default 1.\n The length of input video clip. Default is a single image, but it can be multiple video frames.\n For example, new_length=16 means we will extract a video clip of consecutive 16 frames.\n new_step : int, default 1.\n Temporal sampling rate. For example, new_step=1 means we will extract a video clip of consecutive frames.\n new_step=2 means we will extract a video clip of every other frame.\n temporal_jitter : bool, default False.\n Whether to temporally jitter if new_step > 1.\n video_loader : bool, default False.\n Whether to use video loader to load data.\n use_decord : bool, default True.\n Whether to use Decord video loader to load data. Otherwise use mmcv video loader.\n transform : function, default None.\n A function that takes data and label and transforms them.\n data_aug : str, default 'v1'.\n Different types of data augmentation auto. Supports v1, v2, v3 and v4.\n lazy_init : bool, default False.\n If set to True, build a dataset instance without loading any dataset.\n \"\"\"\n def __init__(self,\n root,\n setting,\n train=True,\n test_mode=False,\n name_pattern='img_%05d.jpg',\n video_ext='mp4',\n is_color=True,\n modality='rgb',\n num_segments=1,\n num_crop=1,\n new_length=1,\n new_step=1,\n transform=None,\n temporal_jitter=False,\n video_loader=False,\n use_decord=False,\n lazy_init=False):\n\n super(VideoMAE, self).__init__()\n self.root = root\n self.setting = setting\n self.train = train\n self.test_mode = test_mode\n self.is_color = is_color\n self.modality = modality\n self.num_segments = num_segments\n self.num_crop = num_crop\n self.new_length = new_length\n self.new_step = new_step\n self.skip_length = self.new_length * self.new_step\n self.temporal_jitter = temporal_jitter\n self.name_pattern = name_pattern\n self.video_loader = video_loader\n self.video_ext = video_ext\n self.use_decord = use_decord\n self.transform = transform\n self.lazy_init = lazy_init\n\n\n if not self.lazy_init:\n self.clips = self._make_dataset(root, setting)\n if len(self.clips) == 0:\n raise(RuntimeError(\"Found 0 video clips in subfolders of: \" + root + \"\\n\"\n \"Check your data directory (opt.data-dir).\"))\n\n def __getitem__(self, index):\n\n directory, target = self.clips[index]\n if self.video_loader:\n if '.' in directory.split('/')[-1]:\n # data in the \"setting\" file already have extension, e.g., demo.mp4\n video_name = directory\n else:\n # data in the \"setting\" file do not have extension, e.g., demo\n # So we need to provide extension (i.e., .mp4) to complete the file name.\n video_name = '{}.{}'.format(directory, self.video_ext)\n\n decord_vr = decord.VideoReader(video_name, num_threads=1)\n duration = len(decord_vr)\n\n segment_indices, skip_offsets = self._sample_train_indices(duration)\n\n images = self._video_TSN_decord_batch_loader(directory, decord_vr, duration, segment_indices, skip_offsets)\n\n process_data, mask = self.transform((images, None)) # T*C,H,W\n process_data = process_data.view((self.new_length, 3) + process_data.size()[-2:]).transpose(0,1) # T*C,H,W -> T,C,H,W -> C,T,H,W\n \n return (process_data, mask)\n\n def __len__(self):\n return len(self.clips)\n\n def _make_dataset(self, directory, setting):\n if not os.path.exists(setting):\n raise(RuntimeError(\"Setting file %s doesn't exist. Check opt.train-list and opt.val-list. \" % (setting)))\n clips = []\n with open(setting) as split_f:\n data = split_f.readlines()\n for line in data:\n line_info = line.split(' ')\n # line format: video_path, video_duration, video_label\n if len(line_info) < 2:\n raise(RuntimeError('Video input format is not correct, missing one or more element. %s' % line))\n clip_path = os.path.join(line_info[0])\n target = int(line_info[1])\n item = (clip_path, target)\n clips.append(item)\n return clips\n\n def _sample_train_indices(self, num_frames):\n average_duration = (num_frames - self.skip_length + 1) // self.num_segments\n if average_duration > 0:\n offsets = np.multiply(list(range(self.num_segments)),\n average_duration)\n offsets = offsets + np.random.randint(average_duration,\n size=self.num_segments)\n elif num_frames > max(self.num_segments, self.skip_length):\n offsets = np.sort(np.random.randint(\n num_frames - self.skip_length + 1,\n size=self.num_segments))\n else:\n offsets = np.zeros((self.num_segments,))\n\n if self.temporal_jitter:\n skip_offsets = np.random.randint(\n self.new_step, size=self.skip_length // self.new_step)\n else:\n skip_offsets = np.zeros(\n self.skip_length // self.new_step, dtype=int)\n return offsets + 1, skip_offsets\n\n\n def _video_TSN_decord_batch_loader(self, directory, video_reader, duration, indices, skip_offsets):\n sampled_list = []\n frame_id_list = []\n for seg_ind in indices:\n offset = int(seg_ind)\n for i, _ in enumerate(range(0, self.skip_length, self.new_step)):\n if offset + skip_offsets[i] <= duration:\n frame_id = offset + skip_offsets[i] - 1\n else:\n frame_id = offset - 1\n frame_id_list.append(frame_id)\n if offset + self.new_step < duration:\n offset += self.new_step\n try:\n video_data = video_reader.get_batch(frame_id_list).asnumpy()\n sampled_list = [Image.fromarray(video_data[vid, :, :, :]).convert('RGB') for vid, _ in enumerate(frame_id_list)]\n except:\n raise RuntimeError('Error occured in reading frames {} from video {} of duration {}.'.format(frame_id_list, directory, duration))\n return sampled_list" }, { "identifier": "SSVideoClsDataset", "path": "dataset/ssv2.py", "snippet": "class SSVideoClsDataset(Dataset):\n \"\"\"Load your own video classification dataset.\"\"\"\n\n def __init__(self, anno_path, data_path, mode='train', clip_len=8,\n crop_size=224, short_side_size=256, new_height=256,\n new_width=340, keep_aspect_ratio=True, num_segment=1,\n num_crop=1, test_num_segment=10, test_num_crop=3, args=None):\n self.anno_path = anno_path\n self.data_path = data_path\n self.mode = mode\n self.clip_len = clip_len\n self.crop_size = crop_size\n self.short_side_size = short_side_size\n self.new_height = new_height\n self.new_width = new_width\n self.keep_aspect_ratio = keep_aspect_ratio\n self.num_segment = num_segment\n self.test_num_segment = test_num_segment\n self.num_crop = num_crop\n self.test_num_crop = test_num_crop\n self.args = args\n self.aug = False\n self.rand_erase = False\n if self.mode in ['train']:\n self.aug = True\n if self.args.reprob > 0:\n self.rand_erase = True\n if VideoReader is None:\n raise ImportError(\"Unable to import `decord` which is required to read videos.\")\n\n import pandas as pd\n cleaned = pd.read_csv(self.anno_path, header=None, delimiter=' ')\n self.dataset_samples = list(cleaned.values[:, 0])\n self.label_array = list(cleaned.values[:, 1])\n\n if (mode == 'train'):\n pass\n\n elif (mode == 'validation'):\n self.data_transform = video_transforms.Compose([\n video_transforms.Resize(self.short_side_size, interpolation='bilinear'),\n video_transforms.CenterCrop(size=(self.crop_size, self.crop_size)),\n volume_transforms.ClipToTensor(),\n video_transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n elif mode == 'test':\n self.data_resize = video_transforms.Compose([\n video_transforms.Resize(size=(short_side_size), interpolation='bilinear')\n ])\n self.data_transform = video_transforms.Compose([\n volume_transforms.ClipToTensor(),\n video_transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n self.test_seg = []\n self.test_dataset = []\n self.test_label_array = []\n for ck in range(self.test_num_segment):\n for cp in range(self.test_num_crop):\n for idx in range(len(self.label_array)):\n sample_label = self.label_array[idx]\n self.test_label_array.append(sample_label)\n self.test_dataset.append(self.dataset_samples[idx])\n self.test_seg.append((ck, cp))\n\n def __getitem__(self, index):\n if self.mode == 'train':\n args = self.args \n scale_t = 1\n\n sample = self.dataset_samples[index]\n sample = os.path.join(self.data_path,sample)# self.data_path + '/videos_train/' + sample\n buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t) # T H W C\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\"video {} not correctly loaded during training\".format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t)\n\n if args.num_sample > 1:\n frame_list = []\n label_list = []\n index_list = []\n for _ in range(args.num_sample):\n new_frames = self._aug_frame(buffer, args)\n label = self.label_array[index]\n frame_list.append(new_frames)\n label_list.append(label)\n index_list.append(index)\n return frame_list, label_list, index_list, {}\n else:\n buffer = self._aug_frame(buffer, args)\n \n return buffer, self.label_array[index], index, {}\n \n elif self.mode == 'validation':\n sample = self.dataset_samples[index]\n sample = os.path.join(self.data_path,sample)# self.data_path + '/videos_train/' + sample\n buffer = self.loadvideo_decord(sample)\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\"video {} not correctly loaded during validation\".format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample)\n buffer = self.data_transform(buffer)\n return buffer, self.label_array[index], sample.split(\"/\")[-1].split(\".\")[0]\n\n elif self.mode == 'test':\n sample = self.test_dataset[index]\n sample = os.path.join(self.data_path,sample)# self.data_path + '/videos_train/' + sample\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.loadvideo_decord(sample)\n\n while len(buffer) == 0:\n warnings.warn(\"video {}, temporal {}, spatial {} not found during testing\".format(\\\n str(self.test_dataset[index]), chunk_nb, split_nb))\n index = np.random.randint(self.__len__())\n sample = self.test_dataset[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.loadvideo_decord(sample)\n\n buffer = self.data_resize(buffer)\n if isinstance(buffer, list):\n buffer = np.stack(buffer, 0)\n\n if self.test_num_crop == 1:\n spatial_step = 1.0 * (max( buffer.shape[1], buffer.shape[2]) - self.short_side_size) \\\n / (self.test_num_crop)\n else:\n spatial_step = 1.0 * (max( buffer.shape[1], buffer.shape[2]) - self.short_side_size) \\\n / (self.test_num_crop - 1)\n temporal_start = chunk_nb # 0/1\n spatial_start = int(split_nb * spatial_step)\n if buffer.shape[1] >= buffer.shape[2]:\n buffer = buffer[temporal_start::2, \\\n spatial_start:spatial_start + self.short_side_size, :, :]\n else:\n buffer = buffer[temporal_start::2, \\\n :, spatial_start:spatial_start + self.short_side_size, :]\n\n buffer = self.data_transform(buffer)\n return buffer, self.test_label_array[index], sample.split(\"/\")[-1].split(\".\")[0], \\\n chunk_nb, split_nb\n else:\n raise NameError('mode {} unkown'.format(self.mode))\n\n def _aug_frame(\n self,\n buffer,\n args,\n ):\n\n aug_transform = video_transforms.create_random_augment(\n input_size=(self.crop_size, self.crop_size),\n auto_augment=args.aa,\n interpolation=args.train_interpolation,\n )\n\n buffer = [\n transforms.ToPILImage()(frame) for frame in buffer\n ]\n\n buffer = aug_transform(buffer)\n\n buffer = [transforms.ToTensor()(img) for img in buffer]\n buffer = torch.stack(buffer) # T C H W\n buffer = buffer.permute(0, 2, 3, 1) # T H W C \n \n # T H W C \n buffer = tensor_normalize(\n buffer, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n )\n # T H W C -> C T H W.\n buffer = buffer.permute(3, 0, 1, 2)\n # Perform data augmentation.\n scl, asp = (\n [0.08, 1.0],\n [0.75, 1.3333],\n )\n\n buffer = spatial_sampling(\n buffer,\n spatial_idx=-1,\n min_scale=256,\n max_scale=320,\n crop_size=self.crop_size,\n random_horizontal_flip=False if args.data_set == 'SSV2' else True,\n inverse_uniform_sampling=False,\n aspect_ratio=asp,\n scale=scl,\n motion_shift=False\n )\n\n if self.rand_erase:\n erase_transform = RandomErasing(\n args.reprob,\n mode=args.remode,\n max_count=args.recount,\n num_splits=args.recount,\n device=\"cpu\",\n )\n buffer = buffer.permute(1, 0, 2, 3)\n buffer = erase_transform(buffer)\n buffer = buffer.permute(1, 0, 2, 3)\n\n return buffer\n\n\n def loadvideo_decord(self, sample, sample_rate_scale=1):\n \"\"\"Load video content using Decord\"\"\"\n fname = sample\n\n if not (os.path.exists(fname)):\n return []\n\n # avoid hanging issue\n if os.path.getsize(fname) < 1 * 1024:\n print('SKIP: ', fname, \" - \", os.path.getsize(fname))\n return []\n try:\n if self.keep_aspect_ratio:\n vr = VideoReader(fname, num_threads=1, ctx=cpu(0))\n else:\n vr = VideoReader(fname, width=self.new_width, height=self.new_height,\n num_threads=1, ctx=cpu(0))\n except:\n print(\"video cannot be loaded by decord: \", fname)\n return []\n \n if self.mode == 'test':\n all_index = []\n tick = len(vr) / float(self.num_segment)\n all_index = list(np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segment)] +\n [int(tick * x) for x in range(self.num_segment)]))\n while len(all_index) < (self.num_segment * self.test_num_segment):\n all_index.append(all_index[-1])\n all_index = list(np.sort(np.array(all_index))) \n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n\n # handle temporal segments\n average_duration = len(vr) // self.num_segment\n all_index = []\n if average_duration > 0:\n all_index += list(np.multiply(list(range(self.num_segment)), average_duration) + np.random.randint(average_duration,\n size=self.num_segment))\n elif len(vr) > self.num_segment:\n all_index += list(np.sort(np.random.randint(len(vr), size=self.num_segment)))\n else:\n all_index += list(np.zeros((self.num_segment,)))\n all_index = list(np.array(all_index)) \n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n\n def __len__(self):\n if self.mode != 'test':\n return len(self.dataset_samples)\n else:\n return len(self.test_dataset)" }, { "identifier": "EpicVideoClsDataset", "path": "dataset/epic.py", "snippet": "class EpicVideoClsDataset(Dataset):\n \n def __init__(self, anno_path, data_path, mode='train', clip_len=8,\n crop_size=224, short_side_size=256, new_height=256,\n new_width=340, keep_aspect_ratio=True, num_segment=1,\n num_crop=1, test_num_segment=10, test_num_crop=3, args=None):\n self.anno_path = anno_path\n self.data_path = data_path\n self.mode = mode\n self.clip_len = clip_len\n self.crop_size = crop_size\n self.short_side_size = short_side_size\n self.new_height = new_height\n self.new_width = new_width\n self.keep_aspect_ratio = keep_aspect_ratio\n self.num_segment = num_segment\n self.test_num_segment = test_num_segment\n self.num_crop = num_crop\n self.test_num_crop = test_num_crop\n self.args = args\n self.aug = False\n self.rand_erase = False\n if self.mode in ['train']:\n self.aug = True\n if self.args.reprob > 0:\n self.rand_erase = True\n if VideoReader is None:\n raise ImportError(\"Unable to import `decord` which is required to read videos.\")\n \n import pandas as pd\n cleaned = pd.read_csv(self.anno_path, header=None, delimiter=',')\n self.dataset_samples = list(cleaned.values[:, 0])\n verb_label_array = list(cleaned.values[:, 1]) # verb\n noun_label_array = list(cleaned.values[:, 2]) # noun\n self.label_array = np.stack((noun_label_array, verb_label_array), axis=1) # label [noun, verb] sequence\n \n if (mode == 'train'):\n pass\n \n elif (mode == 'validation'):\n self.data_transform = video_transforms.Compose([\n video_transforms.Resize(self.short_side_size, interpolation='bilinear'),\n video_transforms.CenterCrop(size=(self.crop_size, self.crop_size)),\n volume_transforms.ClipToTensor(),\n video_transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n elif (mode == 'test'):\n self.data_resize = video_transforms.Compose([\n video_transforms.Resize(size=(short_side_size), interpolation='bilinear')\n ])\n self.data_transform = video_transforms.Compose([\n volume_transforms.ClipToTensor(),\n video_transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n self.test_seg = []\n self.test_dataset = []\n self.test_label_array = []\n for ck in range(self.test_num_segment):\n for cp in range(self.test_num_crop):\n for idx in range(len(self.label_array)):\n sample_label = self.label_array[idx]\n self.test_label_array.append(sample_label)\n self.test_dataset.append(self.dataset_samples[idx])\n self.test_seg.append((ck, cp))\n \n def __getitem__(self, index):\n if self.mode == 'train':\n args = self.args\n scale_t = 1\n \n sample = self.dataset_samples[index] + '.mp4'\n sample = os.path.join(self.data_path, sample)\n buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t) # T H W C\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\"video {} not correctly loaded during training\".format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t)\n \n if args.num_sample > 1:\n frame_list = []\n label_list = []\n index_list = []\n for _ in range(args.num_sample):\n new_frames = self._aug_frame(buffer, args)\n label = self.label_array[index]\n frame_list.append(new_frames)\n label_list.append(label)\n index_list.append(index)\n return frame_list, label_list, index_list, {}\n else:\n buffer = self._aug_frame(buffer, args)\n \n return buffer, self.label_array[index], index, {}\n \n elif self.mode == 'validation':\n sample = self.dataset_samples[index] + '.mp4'\n sample = os.path.join(self.data_path, sample)\n buffer = self.loadvideo_decord(sample)\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\"video {} not correctly loaded during validation\".format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample)\n buffer = self.data_transform(buffer)\n return buffer, self.label_array[index], sample.split(\"/\")[-1].split(\".\")[0]\n \n elif self.mode == 'test':\n sample = self.test_dataset[index] + '.mp4'\n sample = os.path.join(self.data_path, sample)\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.loadvideo_decord(sample)\n\n while len(buffer) == 0:\n warnings.warn(\"video {}, temporal {}, spatial {} not found during testing\".format(\\\n str(self.test_dataset[index]), chunk_nb, split_nb))\n index = np.random.randint(self.__len__())\n sample = self.test_dataset[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.loadvideo_decord(sample)\n\n buffer = self.data_resize(buffer)\n if isinstance(buffer, list):\n buffer = np.stack(buffer, 0)\n\n if self.test_num_crop == 1:\n spatial_step = 1.0 * (max( buffer.shape[1], buffer.shape[2]) - self.short_side_size) \\\n / (self.test_num_crop)\n else:\n spatial_step = 1.0 * (max( buffer.shape[1], buffer.shape[2]) - self.short_side_size) \\\n / (self.test_num_crop - 1)\n temporal_start = chunk_nb # 0/1\n spatial_start = int(split_nb * spatial_step)\n if buffer.shape[1] >= buffer.shape[2]:\n buffer = buffer[temporal_start::2, \\\n spatial_start:spatial_start + self.short_side_size, :, :]\n else:\n buffer = buffer[temporal_start::2, \\\n :, spatial_start:spatial_start + self.short_side_size, :]\n\n buffer = self.data_transform(buffer)\n return buffer, self.test_label_array[index], sample.split(\"/\")[-1].split(\".\")[0], \\\n chunk_nb, split_nb\n else:\n raise NameError('mode {} unkown'.format(self.mode))\n \n \n\n def _aug_frame(self,buffer,args):\n\n aug_transform = video_transforms.create_random_augment(\n input_size=(self.crop_size, self.crop_size),\n auto_augment=args.aa,\n interpolation=args.train_interpolation,\n )\n\n buffer = [\n transforms.ToPILImage()(frame) for frame in buffer\n ]\n\n buffer = aug_transform(buffer)\n\n buffer = [transforms.ToTensor()(img) for img in buffer]\n buffer = torch.stack(buffer) # T C H W\n buffer = buffer.permute(0, 2, 3, 1) # T H W C \n \n # T H W C \n buffer = tensor_normalize(\n buffer, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n )\n # T H W C -> C T H W.\n buffer = buffer.permute(3, 0, 1, 2)\n # Perform data augmentation.\n scl, asp = (\n [0.08, 1.0],\n [0.75, 1.3333],\n )\n\n buffer = spatial_sampling(\n buffer,\n spatial_idx=-1,\n min_scale=256,\n max_scale=320,\n crop_size=self.crop_size,\n random_horizontal_flip=False if args.data_set == 'SSV2' else True,\n inverse_uniform_sampling=False,\n aspect_ratio=asp,\n scale=scl,\n motion_shift=False\n )\n\n if self.rand_erase:\n erase_transform = RandomErasing(\n args.reprob,\n mode=args.remode,\n max_count=args.recount,\n num_splits=args.recount,\n device=\"cpu\",\n )\n buffer = buffer.permute(1, 0, 2, 3)\n buffer = erase_transform(buffer)\n buffer = buffer.permute(1, 0, 2, 3)\n\n return buffer\n \n\n def loadvideo_decord(self, sample, sample_rate_scale=1):\n \"\"\"Load video content using Decord\"\"\"\n fname = sample\n\n if not (os.path.exists(fname)):\n return []\n\n # avoid hanging issue\n if os.path.getsize(fname) < 1 * 1024:\n print('SKIP: ', fname, \" - \", os.path.getsize(fname))\n return []\n try:\n if self.keep_aspect_ratio:\n vr = VideoReader(fname, num_threads=1, ctx=cpu(0))\n else:\n vr = VideoReader(fname, width=self.new_width, height=self.new_height,\n num_threads=1, ctx=cpu(0))\n except:\n print(\"video cannot be loaded by decord: \", fname)\n return []\n \n if self.mode == 'test':\n all_index = []\n tick = len(vr) / float(self.num_segment)\n all_index = list(np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segment)] +\n [int(tick * x) for x in range(self.num_segment)]))\n while len(all_index) < (self.num_segment * self.test_num_segment):\n all_index.append(all_index[-1])\n all_index = list(np.sort(np.array(all_index))) \n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n\n # handle temporal segments\n average_duration = len(vr) // self.num_segment\n all_index = []\n if average_duration > 0:\n all_index += list(np.multiply(list(range(self.num_segment)), average_duration) + np.random.randint(average_duration,\n size=self.num_segment))\n elif len(vr) > self.num_segment:\n all_index += list(np.sort(np.random.randint(len(vr), size=self.num_segment)))\n else:\n all_index += list(np.zeros((self.num_segment,)))\n all_index = list(np.array(all_index)) \n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n\n def __len__(self):\n if self.mode != 'test':\n return len(self.dataset_samples)\n else:\n return len(self.test_dataset)" } ]
import os from torchvision import transforms from util_tools.transforms import * from util_tools.masking_generator import TubeMaskingGenerator from .kinetics import VideoClsDataset, VideoMAE from .ssv2 import SSVideoClsDataset from .epic import EpicVideoClsDataset
10,955
class DataAugmentationForVideoMAE(object): def __init__(self, args): self.input_mean = [0.485, 0.456, 0.406] # IMAGENET_DEFAULT_MEAN self.input_std = [0.229, 0.224, 0.225] # IMAGENET_DEFAULT_STD normalize = GroupNormalize(self.input_mean, self.input_std) self.train_augmentation = GroupMultiScaleCrop(args.input_size, [1, .875, .75, .66]) self.transform = transforms.Compose([ self.train_augmentation, Stack(roll=False), ToTorchFormatTensor(div=True), normalize, ]) if args.mask_type == 'tube': self.masked_position_generator = TubeMaskingGenerator( args.window_size, args.mask_ratio ) def __call__(self, images): process_data, _ = self.transform(images) return process_data, self.masked_position_generator() def __repr__(self): repr = "(DataAugmentationForVideoMAE,\n" repr += " transform = %s,\n" % str(self.transform) repr += " Masked position generator = %s,\n" % str(self.masked_position_generator) repr += ")" return repr def build_pretraining_dataset(args): transform = DataAugmentationForVideoMAE(args) dataset = VideoMAE( root=None, setting=args.data_path, video_ext='mp4', is_color=True, modality='rgb', new_length=args.num_frames, new_step=args.sampling_rate, transform=transform, temporal_jitter=False, video_loader=True, use_decord=True, lazy_init=False) print("Data Aug = %s" % str(transform)) return dataset def build_dataset(is_train, test_mode, args): if args.data_set == 'Kinetics-400': mode = None anno_path = args.anno_path if is_train is True: mode = 'train' anno_path = os.path.join(args.anno_path, 'train.csv') elif test_mode is True: mode = 'test' anno_path = os.path.join(args.anno_path, 'val.csv') else: mode = 'validation' anno_path = os.path.join(args.anno_path, 'val.csv') dataset = VideoClsDataset( anno_path=anno_path, data_path=args.data_path, mode=mode, clip_len=args.num_frames, frame_sample_rate=args.sampling_rate, num_segment=1, test_num_segment=args.test_num_segment, test_num_crop=args.test_num_crop, num_crop=1 if not test_mode else 3, keep_aspect_ratio=True, crop_size=args.input_size, short_side_size=args.short_side_size, new_height=256, new_width=320, args=args) nb_classes = 400 elif args.data_set == 'SSV2': mode = None anno_path = None if is_train is True: mode = 'train' anno_path = os.path.join(args.anno_path, 'train.csv') elif test_mode is True: mode = 'test' anno_path = os.path.join(args.anno_path, 'val.csv') else: mode = 'validation' anno_path = os.path.join(args.anno_path, 'val.csv')
class DataAugmentationForVideoMAE(object): def __init__(self, args): self.input_mean = [0.485, 0.456, 0.406] # IMAGENET_DEFAULT_MEAN self.input_std = [0.229, 0.224, 0.225] # IMAGENET_DEFAULT_STD normalize = GroupNormalize(self.input_mean, self.input_std) self.train_augmentation = GroupMultiScaleCrop(args.input_size, [1, .875, .75, .66]) self.transform = transforms.Compose([ self.train_augmentation, Stack(roll=False), ToTorchFormatTensor(div=True), normalize, ]) if args.mask_type == 'tube': self.masked_position_generator = TubeMaskingGenerator( args.window_size, args.mask_ratio ) def __call__(self, images): process_data, _ = self.transform(images) return process_data, self.masked_position_generator() def __repr__(self): repr = "(DataAugmentationForVideoMAE,\n" repr += " transform = %s,\n" % str(self.transform) repr += " Masked position generator = %s,\n" % str(self.masked_position_generator) repr += ")" return repr def build_pretraining_dataset(args): transform = DataAugmentationForVideoMAE(args) dataset = VideoMAE( root=None, setting=args.data_path, video_ext='mp4', is_color=True, modality='rgb', new_length=args.num_frames, new_step=args.sampling_rate, transform=transform, temporal_jitter=False, video_loader=True, use_decord=True, lazy_init=False) print("Data Aug = %s" % str(transform)) return dataset def build_dataset(is_train, test_mode, args): if args.data_set == 'Kinetics-400': mode = None anno_path = args.anno_path if is_train is True: mode = 'train' anno_path = os.path.join(args.anno_path, 'train.csv') elif test_mode is True: mode = 'test' anno_path = os.path.join(args.anno_path, 'val.csv') else: mode = 'validation' anno_path = os.path.join(args.anno_path, 'val.csv') dataset = VideoClsDataset( anno_path=anno_path, data_path=args.data_path, mode=mode, clip_len=args.num_frames, frame_sample_rate=args.sampling_rate, num_segment=1, test_num_segment=args.test_num_segment, test_num_crop=args.test_num_crop, num_crop=1 if not test_mode else 3, keep_aspect_ratio=True, crop_size=args.input_size, short_side_size=args.short_side_size, new_height=256, new_width=320, args=args) nb_classes = 400 elif args.data_set == 'SSV2': mode = None anno_path = None if is_train is True: mode = 'train' anno_path = os.path.join(args.anno_path, 'train.csv') elif test_mode is True: mode = 'test' anno_path = os.path.join(args.anno_path, 'val.csv') else: mode = 'validation' anno_path = os.path.join(args.anno_path, 'val.csv')
dataset = SSVideoClsDataset(
3
2023-10-25 07:07:05+00:00
16k
OpenProteinAI/PoET
scripts/score.py
[ { "identifier": "Uniprot21", "path": "poet/alphabets.py", "snippet": "class Uniprot21(Alphabet):\n def __init__(\n self,\n mask=False,\n include_gap=False,\n include_startstop=False,\n distinct_startstop=False,\n ):\n chars = b\"ARNDCQEGHILKMFPSTWYV\"\n gap_token = start_token = stop_token = -1\n if include_gap:\n chars = chars + b\"-\"\n gap_token = len(chars) - 1\n if include_startstop:\n chars = chars + b\"*\"\n start_token = stop_token = len(chars) - 1\n if distinct_startstop:\n chars = chars + b\"$\"\n stop_token = len(chars) - 1\n # add the synonym tokens\n mask_token = len(chars)\n chars = chars + b\"XOUBZ\"\n\n encoding = np.arange(len(chars))\n encoding[mask_token + 1 :] = [\n 11,\n 4,\n mask_token,\n mask_token,\n ] # encode 'OUBZ' as synonyms\n missing = mask_token\n\n super(Uniprot21, self).__init__(\n chars, encoding=encoding, mask=mask, missing=missing\n )\n\n self.gap_token = gap_token\n self.start_token = start_token\n self.stop_token = stop_token\n self.mask_token = mask_token" }, { "identifier": "parse_stream", "path": "poet/fasta.py", "snippet": "def parse_stream(f, comment=b\"#\", upper=True):\n name = None\n sequence = []\n for line in f:\n if line.startswith(comment):\n continue\n line = line.strip()\n if line.startswith(b\">\"):\n if name is not None:\n yield name, b\"\".join(sequence)\n name = line[1:]\n sequence = []\n else:\n if upper:\n sequence.append(line.upper())\n else:\n sequence.append(line)\n if name is not None:\n yield name, b\"\".join(sequence)" }, { "identifier": "PackedTensorSequences", "path": "poet/models/modules/packed_sequence.py", "snippet": "class PackedTensorSequences:\n def __init__(\n self,\n packed_tensor: torch.Tensor,\n positions: torch.Tensor,\n indices: Optional[torch.Tensor],\n cu_seqlens: torch.Tensor,\n cu_seqlens_cpu: torch.Tensor,\n max_s: Union[torch.Tensor, int],\n batch_size: Optional[int],\n to_paddedable: bool = True,\n ):\n \"\"\"\n If to_paddedable, indicies and batch_size must be set to values that allow this\n object to be correctly padded.\n \"\"\"\n if to_paddedable:\n assert batch_size is not None\n\n self.x = packed_tensor\n self.positions = positions\n self.indices = indices\n self.cu_seqlens = cu_seqlens\n self.cu_seqlens_cpu = cu_seqlens_cpu\n self.max_s = max_s\n self.batch_size = batch_size\n self.to_paddedable = to_paddedable\n\n @property\n def dtype(self):\n return self.x.dtype\n\n @property\n def is_cuda(self):\n return self.x.is_cuda\n\n @property\n def device(self):\n return self.x.device\n\n @staticmethod\n def pack_input(x: torch.Tensor, positions=None, key_padding_mask=None):\n b = x.size(0)\n s = x.size(1)\n if positions is None:\n positions = (\n torch.arange(s, dtype=torch.long, device=x.device)\n .unsqueeze(0)\n .expand(b, s)\n )\n if key_padding_mask is None:\n x_packed = x.reshape(b * s, -1)\n positions = positions.reshape(b * s)\n indices = None\n cu_seqlens = torch.arange(\n 0, (b + 1) * s, step=s, dtype=torch.int32, device=x.device\n )\n cu_seqlens_cpu = torch.arange(\n 0,\n (b + 1) * s,\n step=s,\n dtype=torch.int32,\n )\n max_s = s\n else:\n # flash attention padding function expects 1 for valid and 0 for invalid positions...\n key_padding_mask_bool = ~(key_padding_mask.bool())\n x_packed, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask_bool)\n cu_seqlens_cpu = cu_seqlens.cpu()\n positions, _, _, _ = unpad_input(\n positions.unsqueeze(2), key_padding_mask_bool\n )\n positions = positions.squeeze(1)\n return PackedTensorSequences(\n x_packed, positions, indices, cu_seqlens, cu_seqlens_cpu, max_s, b\n )\n\n def to_padded(self, return_mask=False, return_positions=False):\n if not self.to_paddedable:\n raise ValueError(\"Cannot be to_padded\")\n\n s = self.max_s\n b = self.batch_size\n mask = None\n x = self.x\n pos = self.positions\n if self.indices is None:\n # we are just a flattened matrix...\n x = x.view(b, s, *x.shape[1:])\n pos = pos.view(b, s)\n else:\n dims = None\n if x.ndim > 2:\n dims = x.shape[1:]\n x = x.view(x.size(0), -1)\n x, mask = pad_input(x, self.indices, b, s, return_mask=return_mask)\n pos, _ = pad_input(pos.unsqueeze(1), self.indices, b, s)\n pos = pos.squeeze(2)\n if dims is not None:\n x = x.view(x.size(0), x.size(1), *dims)\n\n if return_mask and return_positions:\n return x, mask, pos\n elif return_mask:\n return x, mask\n elif return_positions:\n return x, pos\n else:\n return x\n\n @staticmethod\n def compute_indices(seqlens: torch.Tensor):\n indices_mask = get_mask(seqlens)\n indices = torch.nonzero(~indices_mask.flatten(), as_tuple=False).flatten()\n return indices" }, { "identifier": "PoET", "path": "poet/models/poet.py", "snippet": "class PoET(nn.Module, LogitsAllocateMemoryMixin):\n def __init__(\n self,\n n_vocab: int,\n hidden_dim: int = 768,\n ff_dim: Optional[int] = None,\n num_layers: int = 6,\n nhead: int = 12,\n dropout: float = 0,\n use_multi_rotary: bool = True,\n norm: bool = False,\n mask_token: int = 21, # kept just to maintain compatability with old models\n ):\n super().__init__()\n self.n_vocab = n_vocab\n self.hidden_dim = hidden_dim\n self.dropout = dropout\n\n self.token_embed = nn.Embedding(n_vocab, hidden_dim)\n # kept just to maintain compatability with old models\n self.rotary_emb = RotaryEmbedding(hidden_dim // nhead)\n\n ff_dim = ff_dim or 4 * hidden_dim\n\n self.decoder = TransformerEncoder(\n encoder_layer=TieredRotaryTransformerEncoderLayer(\n d_model=hidden_dim,\n nhead=nhead,\n dim_feedforward=ff_dim,\n dropout=dropout,\n use_multi_rotary=use_multi_rotary,\n batch_first=True,\n causal=True,\n ),\n num_layers=num_layers,\n )\n\n if norm:\n self.norm = nn.LayerNorm(hidden_dim)\n else:\n self.norm = nn.Identity()\n\n self.linear = nn.Linear(hidden_dim, n_vocab)\n\n def embed(\n self,\n xs: torch.Tensor,\n segment_sizes: torch.Tensor,\n allow_cpu_offload: bool = False,\n pbar_position: Optional[int] = None,\n ) -> list[PackedTensorSequences]:\n \"\"\"\n Returns the memory of each layer in a list. The memory is the input to the\n multi-sequence attention.\n\n Args:\n xs:\n (B, L) sequence of sequences\n segment_sizes:\n (B, N) the lengths of each sequence in the sequence of sequences\n allow_cpu_offload:\n whether or not memory should be offloaded to cpu if CUDA OOMs\n pbar_position:\n position of a tqdm progress bar if not None\n\n Returns:\n The memory. If allow_cpu_offload and there is insufficient GPU memory to\n store the tensors, the tensors will be stored in CPU memory instead.\n \"\"\"\n seqs_seqlens = segment_sizes.sum(dim=1).type(torch.int32)\n xs, _, _, _ = unpad_input(xs.unsqueeze(2), ~get_mask(seqs_seqlens))\n xs = xs.squeeze(1)\n h = self.token_embed.forward(xs)\n\n segment_sizes_cpu = segment_sizes.cpu()\n seqs_seqlens_cpu = segment_sizes_cpu.sum(dim=1).type(torch.int32)\n nonzero_segment_sizes_cpu = (\n segment_sizes_cpu[segment_sizes_cpu > 0].flatten().type(torch.int32)\n )\n cu_seqlens_cpu = F.pad(\n nonzero_segment_sizes_cpu.cumsum(\n dim=0, dtype=nonzero_segment_sizes_cpu.dtype\n ),\n (1, 0),\n )\n cu_seqlens = cu_seqlens_cpu.to(xs.device)\n h = PackedTensorSequences(\n packed_tensor=h,\n positions=torch.cat(\n [\n torch.arange(segment_size, dtype=xs.dtype, device=xs.device)\n for segment_size in nonzero_segment_sizes_cpu\n ]\n ),\n cu_seqlens=cu_seqlens,\n cu_seqlens_cpu=cu_seqlens_cpu,\n max_s=nonzero_segment_sizes_cpu.max(),\n # only needed for unpadding (used in standard attn)\n to_paddedable=False,\n indices=None,\n batch_size=None,\n )\n\n memory = []\n output_device: Optional[torch.device] = None\n if pbar_position is None:\n layers = self.decoder.layers\n else:\n layers = tqdm(\n self.decoder.layers,\n desc=f\"[{pbar_position}] encoding\",\n leave=False,\n position=pbar_position,\n )\n for layer in layers:\n layer: TieredRotaryTransformerEncoderLayer\n try:\n h, (_, _), (key, value) = layer.forward(\n h,\n seqs_cu_seqlens=F.pad(\n seqs_seqlens.cumsum(dim=0, dtype=seqs_seqlens.dtype), (1, 0)\n ),\n seqs_cu_seqlens_cpu=F.pad(\n seqs_seqlens_cpu.cumsum(dim=0, dtype=seqs_seqlens.dtype),\n (1, 0),\n ),\n return_memory=True,\n )\n if output_device is not None:\n key.x = key.x.to(output_device)\n value.x = value.x.to(output_device)\n except RuntimeError as e:\n if \"CUDA out of memory\" in str(e) and allow_cpu_offload:\n if pbar_position is not None:\n tqdm.write(\n \"OOMed during encoding, retrying by offloading to cpu\"\n )\n torch.cuda.empty_cache()\n output_device = torch.device(\"cpu\")\n for this_memory in memory:\n this_memory.x = this_memory.x.to(output_device)\n torch.cuda.empty_cache()\n h, (_, _), (key, value) = layer.forward(\n h,\n seqs_cu_seqlens=F.pad(\n seqs_seqlens.cumsum(dim=0, dtype=seqs_seqlens.dtype), (1, 0)\n ),\n seqs_cu_seqlens_cpu=F.pad(\n seqs_seqlens_cpu.cumsum(dim=0, dtype=seqs_seqlens.dtype),\n (1, 0),\n ),\n return_memory=True,\n )\n key.x = key.x.to(output_device)\n value.x = value.x.to(output_device)\n else:\n raise e\n memory.append(key)\n memory.append(value)\n return memory\n\n def logits(\n self,\n x: torch.Tensor,\n memory: Optional[list[PackedTensorSequences]],\n preallocated_memory: bool = False,\n return_embeddings: bool = False,\n ) -> Union[torch.Tensor, tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"\n Compute the next token probability distributions given a precomputed memory\n (see self.embed and/or self.logits_allocate_memory).\n\n Args\n x:\n (B, L) sequence of sequences of tokens\n memory:\n output of self.embed\n if not preallocated_memory, has batch size 1 (it will be expanded if necessary)\n if memory is not on the same device as x, a copy of memory will be made to the\n device of x as necessary\n preallocated_memory:\n whether or not additional memory needed for this method was preallocated\n using self.logits_allocate_memory\n\n Returns:\n logits:\n (B, L, V) logits of the next token probability distributions. Here, V is\n the vocabulary size\n \"\"\"\n B, L_x = x.size()\n\n x: PackedTensorSequences = PackedTensorSequences.pack_input(x.unsqueeze(2))\n x.x = self.token_embed.forward(x.x.squeeze(1))\n\n x = _apply_causal_prefix_attention(\n decoder=self.decoder,\n x=x,\n batch_size=B,\n length=L_x,\n self_memory=None,\n memory=memory,\n preallocated_memory=preallocated_memory,\n )\n\n embeddings = self.norm(x.x)\n logits = self.linear.forward(embeddings).view(B, L_x, -1)\n if not return_embeddings:\n return logits\n else:\n return logits, embeddings.view(B, L_x, -1)\n\n def sample(\n self,\n xs: torch.Tensor,\n segment_sizes: torch.Tensor,\n temperature: float = 1,\n top_k: Optional[int] = None,\n top_p: Optional[float] = None,\n maxlen: int = 1000,\n alphabet: Uniprot21 = Uniprot21(\n include_gap=True, include_startstop=True, distinct_startstop=True\n ),\n remove_invalid: bool = True,\n batch_size: int = 1,\n ) -> tuple[torch.Tensor, float]:\n \"\"\"Sample batch_size sequences.\n\n Note: this implementation is out of date\n \"\"\"\n return self.sample_given_memory(\n memory=self.embed(xs, segment_sizes),\n temperature=temperature,\n top_k=top_k,\n top_p=top_p,\n maxlen=maxlen,\n alphabet=alphabet,\n remove_invalid=remove_invalid,\n batch_size=batch_size,\n )\n\n @torch.inference_mode()\n def sample_given_memory(\n self,\n memory: Optional[list[PackedTensorSequences]],\n temperature: float = 1,\n top_k: Optional[int] = None,\n top_p: Optional[float] = None,\n maxlen: int = 1000,\n alphabet: Uniprot21 = Uniprot21(\n include_gap=True, include_startstop=True, distinct_startstop=True\n ),\n remove_invalid: bool = True,\n batch_size: int = 1,\n ) -> tuple[list[torch.Tensor], torch.Tensor]:\n \"\"\"Sample batch_size sequences from memory.\n\n Assumes memory represents one prompt, and samples each sequence from that one\n prompt.\n\n Note: this implementation is out of date\n\n Args:\n memory:\n Output of self.embed\n Must only describe one sequence of sequences i.e. have a batch size of 1\n temperature:\n Controls the randomness of the sampling by dividing the logits\n top_k:\n Controls the number of most probable tokens to consider at each step of\n sampling\n Default is None, which means all tokens are considered\n top_p:\n Controls the cumulative probability of the most probable tokens to consider\n at each step of sampling as in nucleus sampling\n Default is None, which is equivalent to the behavior with top_p=1\n maxlen:\n Maximum sequence length to sample, not including start and stop tokens\n Thus, returned sequences with have length up to maxlen+2, where the first\n token is the start token, and the last token is the stop token if the\n sequence terminates within maxlen tokens.\n alphabet:\n The alphabet encoding the sequence.\n remove_invalid:\n Whether or not to avoid sampling non-amino acids within a sequence.\n batch_size:\n Number of sequences to sample in parallel\n\n Returns:\n A tuple (sample_xs, sample_scores), where sample_xs is a list containing the\n sampled sequences as tensors encoded by alphabet, and sample_scores is a\n tensor containing the negative log likelihood of each sampled sequence.\n \"\"\"\n criteria = nn.CrossEntropyLoss(\n ignore_index=alphabet.mask_token, reduction=\"none\"\n )\n device = next(self.parameters()).device\n dtype = next(self.parameters()).dtype\n invalid_tokens = torch.tensor(\n [alphabet.mask_token, alphabet.start_token, alphabet.gap_token],\n device=device,\n )\n nhead = self.decoder.layers[0].num_heads\n head_dim = self.decoder.layers[0].dim // nhead\n\n # initialize memory buffer\n buffer_size = (batch_size, maxlen + 2, nhead, head_dim)\n self_buffer = [\n torch.empty(buffer_size, device=device, dtype=dtype)\n for _ in range(2 * len(self.decoder.layers))\n ]\n buffer = [\n torch.empty(buffer_size, device=device, dtype=dtype)\n for _ in range(2 * len(self.decoder.layers))\n ]\n\n # initialize x\n current_token = (\n torch.ones((batch_size, 1), dtype=torch.long, device=device)\n * alphabet.start_token\n )\n current_x = current_token\n current_position = torch.zeros((batch_size, 1), dtype=torch.long, device=device)\n current_position_int = 0\n current_logits: Optional[torch.Tensor] = None\n\n # sample rest of x\n sampled_xs, sampled_scores = [], []\n while True:\n # get logits for current x\n x: PackedTensorSequences = PackedTensorSequences.pack_input(\n current_token.unsqueeze(2),\n positions=current_position,\n )\n x.x = self.token_embed.forward(x.x.squeeze(1))\n x = _apply_causal_prefix_attention_buffered(\n decoder=self.decoder,\n x=x,\n memory=memory,\n self_buffer=[buf[:, : current_position_int + 1] for buf in self_buffer],\n buffer=[buf[:, : current_position_int + 1] for buf in buffer],\n )\n embeddings = self.norm(x.x)\n logits = self.linear.forward(embeddings).unsqueeze(1)\n\n # sample the next token\n next_token_logits = logits[:, -1].log_softmax(dim=1)\n if remove_invalid:\n next_token_logits[:, invalid_tokens] += -torch.inf\n next_token_logits /= temperature\n next_token_logits = top_k_top_p_filtering(\n next_token_logits, top_k=top_k, top_p=top_p\n )\n next_token = torch.multinomial(\n next_token_logits.float().softmax(dim=-1), 1\n ).flatten()\n\n # update state\n current_token = next_token.unsqueeze(1)\n current_x = torch.cat([current_x, current_token], dim=1)\n current_position = current_position + 1\n current_position_int += 1\n if current_logits is None:\n current_logits = logits\n else:\n current_logits = torch.cat([current_logits, logits], dim=1)\n\n # apply sampling termination conditions\n is_stop_batch_filter = (\n (next_token == alphabet.stop_token)\n if current_x.size(1) < maxlen + 2\n else torch.ones((current_x.size(0),), dtype=torch.bool, device=device)\n )\n if is_stop_batch_filter.sum() > 0:\n is_stop_batch_idxs = torch.where(is_stop_batch_filter)[0]\n not_is_stop_batch_idxs = torch.where(~is_stop_batch_filter)[0]\n\n sampled_xs.extend(current_x[is_stop_batch_idxs].unbind())\n sampled_scores.append(\n -criteria.forward(\n current_logits[is_stop_batch_idxs].transpose(1, 2),\n current_x[is_stop_batch_idxs, 1:].cuda(),\n )\n .float()\n .sum(dim=1)\n )\n if is_stop_batch_idxs.numel() == current_x.size(0):\n break\n else:\n # remove terminated sequences from state\n _filter = not_is_stop_batch_idxs\n current_token = current_token[_filter]\n current_x = current_x[_filter]\n current_position = current_position[_filter]\n current_logits = current_logits[_filter]\n for idx in range(len(self_buffer)):\n self_buffer[idx] = self_buffer[idx][_filter]\n for idx in range(len(buffer)):\n buffer[idx] = buffer[idx][_filter]\n return sampled_xs, torch.hstack(sampled_scores)\n\n @torch.inference_mode()\n def sample_given_memories(\n self,\n memory: list[PackedTensorSequences],\n temperature: float = 1,\n top_k: Optional[int] = None,\n top_p: Optional[float] = None,\n maxlen: int = 1000,\n alphabet: Uniprot21 = Uniprot21(\n include_gap=True, include_startstop=True, distinct_startstop=True\n ),\n remove_invalid: bool = True,\n ) -> tuple[list[torch.Tensor], torch.Tensor]:\n \"\"\"Sample one sequence for each prompt described by memory.\n\n Unlike self.sample_given_memory, memory can represent multiple prompts.\n\n This method may have higher memory requirements than self.sample_given_memory\n and self.sample_given_memories_ensemble. Roughly speaking, it may allocate\n additional memory equal to the total memory used by `memory`, whereas the other\n methods may allocate additional memory equal to the memory used by only two\n items in `memory` e.g. memory[0] and memory[1].\n\n Note: this implementation is out of date\n\n Args:\n memory:\n Output of self.embed\n temperature:\n Controls the randomness of the sampling by dividing the logits\n top_k:\n Controls the number of most probable tokens to consider at each step of\n sampling\n Default is None, which means all tokens are considered\n top_p:\n Controls the cumulative probability of the most probable tokens to consider\n at each step of sampling as in nucleus sampling\n Default is None, which is equivalent to the behavior with top_p=1\n maxlen:\n Maximum sequence length to sample, not including start and stop tokens\n Thus, returned sequences with have length up to maxlen+2, where the first\n token is the start token, and the last token is the stop token if the\n sequence terminates within maxlen tokens.\n alphabet:\n The alphabet encoding the sequence.\n remove_invalid:\n Whether or not to avoid sampling non-amino acids within a sequence.\n\n Returns:\n A tuple (sample_xs, sample_scores), where sample_xs is a list containing the\n sampled sequences as tensors encoded by alphabet, and sample_scores is a\n tensor containing the negative log likelihood of each sampled sequence.\n\n The order of the samples corresponds to the order of the prompts i.e. the nth\n sample in sample_xs/sample_scores is sampled from the nth prompt in memory.\n \"\"\"\n criteria = nn.CrossEntropyLoss(\n ignore_index=alphabet.mask_token, reduction=\"none\"\n )\n device = next(self.parameters()).device\n dtype = next(self.parameters()).dtype\n invalid_tokens = torch.tensor(\n [alphabet.mask_token, alphabet.start_token, alphabet.gap_token],\n device=device,\n )\n batch_size = memory[0].cu_seqlens.numel() - 1\n nhead = self.decoder.layers[0].num_heads\n head_dim = self.decoder.layers[0].dim // nhead\n\n # initialize memory buffer\n buffer_size = (batch_size, maxlen + 2, nhead, head_dim)\n self_buffer = [\n torch.empty(buffer_size, device=device, dtype=dtype)\n for _ in range(2 * len(self.decoder.layers))\n ]\n buffer = [\n torch.empty(buffer_size, device=device, dtype=dtype)\n for _ in range(2 * len(self.decoder.layers))\n ]\n\n # initialize x\n current_token = (\n torch.ones((batch_size, 1), dtype=torch.long, device=device)\n * alphabet.start_token\n )\n current_x = current_token\n current_position = torch.zeros((batch_size, 1), dtype=torch.long, device=device)\n current_position_int = 0\n current_logits: Optional[torch.Tensor] = None\n\n # sample rest of x\n sampled_xs, sampled_scores = [], []\n sampled_order, remaining_order = [], torch.arange(batch_size, device=device)\n while True:\n # get logits for current x\n x: PackedTensorSequences = PackedTensorSequences.pack_input(\n current_token.unsqueeze(2),\n positions=current_position,\n )\n x.x = self.token_embed.forward(x.x.squeeze(1))\n B = x.cu_seqlens.numel() - 1\n x = _apply_causal_prefix_attention_buffered(\n decoder=self.decoder,\n x=x,\n memory=memory,\n self_buffer=[buf[:, : current_position_int + 1] for buf in self_buffer],\n buffer=[buf[:, : current_position_int + 1] for buf in buffer],\n )\n embeddings = self.norm(x.x)\n logits = self.linear.forward(embeddings).view(B, 1, -1)\n\n # sample the next token\n next_token_logits = logits[:, -1].log_softmax(dim=1)\n if remove_invalid:\n next_token_logits[:, invalid_tokens] += -torch.inf\n next_token_logits /= temperature\n next_token_logits = top_k_top_p_filtering(\n next_token_logits, top_k=top_k, top_p=top_p\n )\n next_token = torch.multinomial(\n next_token_logits.float().softmax(dim=-1), 1\n ).flatten()\n\n # update state\n current_token = next_token.unsqueeze(1)\n current_x = torch.cat([current_x, current_token], dim=1)\n current_position = current_position + 1\n current_position_int += 1\n if current_logits is None:\n current_logits = logits\n else:\n current_logits = torch.cat([current_logits, logits], dim=1)\n\n # apply sampling termination conditions\n is_stop_batch_filter = (\n (next_token == alphabet.stop_token)\n if current_x.size(1) < maxlen + 2\n else torch.ones((current_x.size(0),), dtype=torch.bool, device=device)\n )\n if is_stop_batch_filter.sum() > 0:\n is_stop_batch_idxs = torch.where(is_stop_batch_filter)[0]\n not_is_stop_batch_idxs = torch.where(~is_stop_batch_filter)[0]\n not_is_stop_batch_idxs_cpu = not_is_stop_batch_idxs.cpu()\n\n sampled_order.append(remaining_order[is_stop_batch_idxs])\n remaining_order = remaining_order[not_is_stop_batch_idxs]\n sampled_xs.extend(current_x[is_stop_batch_idxs].unbind())\n sampled_scores.append(\n -criteria.forward(\n current_logits[is_stop_batch_idxs].transpose(1, 2),\n current_x[is_stop_batch_idxs, 1:].cuda(),\n )\n .float()\n .sum(dim=1)\n )\n if is_stop_batch_idxs.numel() == current_x.size(0):\n break\n else:\n # remove terminated sequences from state\n _filter = not_is_stop_batch_idxs\n _filter_cpu = not_is_stop_batch_idxs_cpu\n current_token = current_token[_filter]\n current_x = current_x[_filter]\n current_position = current_position[_filter]\n current_logits = current_logits[_filter]\n for idx in range(len(self_buffer)):\n self_buffer[idx] = self_buffer[idx][_filter]\n for idx in range(len(buffer)):\n buffer[idx] = buffer[idx][_filter]\n\n new_start_idxs = memory[0].cu_seqlens_cpu[:-1][_filter_cpu]\n new_end_idxs = memory[0].cu_seqlens_cpu[1:][_filter_cpu]\n filtered_idxs = torch.hstack(\n [\n torch.arange(\n new_start_idxs[idx], new_end_idxs[idx], device=device\n )\n for idx in range(_filter.numel())\n ]\n )\n memory = [copy.copy(mem) for mem in memory]\n for mem in memory:\n mem.x = mem.x[filtered_idxs]\n mem.positions = mem.positions[filtered_idxs]\n mem.cu_seqlens = F.pad(\n mem.cu_seqlens.diff()[_filter].cumsum(\n dim=0, dtype=torch.int32\n ),\n (1, 0),\n )\n mem.cu_seqlens_cpu = F.pad(\n mem.cu_seqlens_cpu.diff()[_filter_cpu].cumsum(\n dim=0, dtype=torch.int32\n ),\n (1, 0),\n )\n mem.max_s = mem.cu_seqlens_cpu.diff().max()\n mem.to_paddedable = False\n\n # order sampled sequences by the order of the input memories\n sampled_order = torch.hstack(sampled_order).argsort()\n sampled_xs = [sampled_xs[i] for i in sampled_order]\n sampled_scores = torch.hstack(sampled_scores)[sampled_order]\n return sampled_xs, sampled_scores\n\n @torch.inference_mode()\n def sample_given_memories_ensemble(\n self,\n memory: list[PackedTensorSequences],\n temperature: float = 1,\n top_k: Optional[int] = None,\n top_p: Optional[float] = None,\n maxlen: int = 1000,\n alphabet: Uniprot21 = Uniprot21(\n include_gap=True, include_startstop=True, distinct_startstop=True\n ),\n remove_invalid: bool = True,\n ) -> tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Sample one sequence by ensembling the prompts described by memory.\n\n Note: this implementation is out of date\n\n Args:\n memory:\n Output of self.embed\n temperature:\n Controls the randomness of the sampling by dividing the logits\n top_k:\n Controls the number of most probable tokens to consider at each step of\n sampling\n Default is None, which means all tokens are considered\n top_p:\n Controls the cumulative probability of the most probable tokens to consider\n at each step of sampling as in nucleus sampling\n Default is None, which is equivalent to the behavior with top_p=1\n maxlen:\n Maximum sequence length to sample, not including start and stop tokens\n Thus, returned sequences with have length up to maxlen+2, where the first\n token is the start token, and the last token is the stop token if the\n sequence terminates within maxlen tokens.\n alphabet:\n The alphabet encoding the sequence.\n remove_invalid:\n Whether or not to avoid sampling non-amino acids within a sequence.\n\n Returns:\n A tuple (sample_x, sample_scores), where sample_x is the sampled sequence\n encoded by alphabet, and sample_scores is a tensor containing the negative\n log likelihood of sample_x conditioned on each prompt in memory.\n \"\"\"\n device = next(self.parameters()).device\n dtype = next(self.parameters()).dtype\n invalid_tokens = torch.tensor(\n [alphabet.mask_token, alphabet.start_token, alphabet.gap_token],\n device=device,\n )\n batch_size = memory[0].cu_seqlens.numel() - 1\n nhead = self.decoder.layers[0].num_heads\n head_dim = self.decoder.layers[0].dim // nhead\n\n # initialize memory buffer\n buffer_size = (batch_size, maxlen + 2, nhead, head_dim)\n self_buffer = [\n torch.empty(buffer_size, device=device, dtype=dtype)\n for _ in range(2 * len(self.decoder.layers))\n ]\n buffer = [\n torch.empty(buffer_size, device=device, dtype=dtype)\n for _ in range(2 * len(self.decoder.layers))\n ]\n\n # initialize x\n current_token = (\n torch.ones((batch_size, 1), dtype=torch.long, device=device)\n * alphabet.start_token\n )\n current_x = current_token\n current_position = torch.zeros((batch_size, 1), dtype=torch.long, device=device)\n current_position_int = 0\n current_logits_sum = torch.zeros(\n (batch_size,), dtype=torch.float32, device=device\n )\n\n # sample rest of x\n while True:\n # get logits for current x\n x: PackedTensorSequences = PackedTensorSequences.pack_input(\n current_token.unsqueeze(2),\n positions=current_position,\n )\n x.x = self.token_embed.forward(x.x.squeeze(1))\n B = x.cu_seqlens.numel() - 1\n x = _apply_causal_prefix_attention_buffered(\n decoder=self.decoder,\n x=x,\n memory=memory,\n self_buffer=[buf[:, : current_position_int + 1] for buf in self_buffer],\n buffer=[buf[:, : current_position_int + 1] for buf in buffer],\n )\n embeddings = self.norm(x.x)\n logits = self.linear.forward(embeddings).view(B, 1, -1)\n\n # sample the next token\n next_token_logits = logits[:, -1].log_softmax(dim=1)\n weights = current_logits_sum.softmax(dim=0)\n per_memory_next_token_logits = next_token_logits\n next_token_logits = (next_token_logits * weights.unsqueeze(1)).sum(dim=0)\n if remove_invalid:\n next_token_logits[invalid_tokens] += -torch.inf\n next_token_logits /= temperature\n next_token_logits = top_k_top_p_filtering(\n next_token_logits.unsqueeze(0), top_k=top_k, top_p=top_p\n ).squeeze(0)\n next_token = torch.multinomial(next_token_logits.float().softmax(dim=-1), 1)\n\n # update state\n current_token = next_token.unsqueeze(0).expand(batch_size, -1)\n current_x = torch.cat([current_x, current_token], dim=1)\n current_position = current_position + 1\n current_position_int += 1\n current_logits_sum += per_memory_next_token_logits[:, next_token].flatten()\n\n # apply sampling termination conditions\n if next_token == alphabet.stop_token or current_x.size(1) == maxlen + 2:\n return current_x[0], current_logits_sum\n\n def forward(self, xs: torch.Tensor, segment_sizes: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Compute the next token probability distributions.\n\n Examples:\n Example input with batch size 1\n\n xs: [$ A B * $ A B C * $ E F]\n segment_sizes: [[4, 5, 3]]\n\n Note that the last sequence in a sequence of sequences does not need to have a\n stop token.\n\n Args:\n xs:\n (B, L) sequence of sequences of tokens\n segment_sizes:\n (B, N) the lengths of each sequence in the sequence of sequences\n\n Returns:\n (B, L, V) logits of the next token probability distributions. Here, V is\n the vocabulary size\n\n \"\"\"\n B, L = xs.size()\n\n seqs_seqlens = segment_sizes.sum(dim=1).type(torch.int32)\n xs, indices, _, _ = unpad_input(xs.unsqueeze(2), ~get_mask(seqs_seqlens))\n xs = xs.squeeze(1)\n h = self.token_embed.forward(xs)\n\n segment_sizes_cpu = segment_sizes.cpu()\n seqs_seqlens_cpu = segment_sizes_cpu.sum(dim=1).type(torch.int32)\n nonzero_segment_sizes_cpu = (\n segment_sizes_cpu[segment_sizes_cpu > 0].flatten().type(torch.int32)\n )\n cu_seqlens_cpu = F.pad(\n nonzero_segment_sizes_cpu.cumsum(\n dim=0, dtype=nonzero_segment_sizes_cpu.dtype\n ),\n (1, 0),\n )\n cu_seqlens = cu_seqlens_cpu.to(xs.device)\n h = PackedTensorSequences(\n packed_tensor=h,\n positions=torch.cat(\n [\n torch.arange(segment_size, dtype=xs.dtype, device=xs.device)\n for segment_size in nonzero_segment_sizes_cpu\n ]\n ),\n cu_seqlens=cu_seqlens,\n cu_seqlens_cpu=cu_seqlens_cpu,\n max_s=nonzero_segment_sizes_cpu.max(),\n # only needed for unpadding (used in standard attn)\n to_paddedable=False,\n indices=None,\n batch_size=None,\n )\n h = self.decoder.forward(\n h,\n seqs_cu_seqlens=F.pad(\n seqs_seqlens.cumsum(dim=0, dtype=seqs_seqlens.dtype), (1, 0)\n ),\n seqs_cu_seqlens_cpu=F.pad(\n seqs_seqlens_cpu.cumsum(dim=0, dtype=seqs_seqlens_cpu.dtype),\n (1, 0),\n ),\n )\n\n logits = self.linear.forward(self.norm(h.x))\n logits, _ = pad_input(logits, indices, B, L) # (B,L,num_tokens)\n return logits" }, { "identifier": "MSASampler", "path": "poet/msa/sampling.py", "snippet": "class MSASampler(BaseModel):\n # TODO: refactor msa sampling code...\n method: Union[TopSampler, RandomSampler, NeighborsSampler] = Field(\n ..., discriminator=\"sampler_type\"\n )\n force_include_first: bool = False\n max_similarity: float = 1.0\n max_dissimilarity: float = 1.0\n\n def _get_sim_filtered_idxs(self, msa: np.ndarray) -> np.ndarray:\n nonnormalized_sim = (msa == msa[[0]]).sum(axis=1)\n normfactor = msa.shape[1]\n norm_sim = nonnormalized_sim / normfactor\n\n assert (norm_sim.min() >= 0) and (norm_sim.max() <= 1)\n dsim = 1 - norm_sim\n\n max_sim_filter = norm_sim <= self.max_similarity\n max_dissim_filter = dsim <= self.max_dissimilarity\n return np.where(max_sim_filter & max_dissim_filter)[0]\n\n def get_sample_idxs(\n self,\n msa: np.ndarray,\n gap_token: int,\n seed: Optional[int] = None,\n result_cache_dir: Optional[Path] = None,\n ) -> np.ndarray:\n _, weights = self.method.get_weights(\n msa=msa, gap_token=gap_token, result_cache_dir=result_cache_dir\n )\n\n original_msa_sample_idxs = np.arange(len(msa))\n sample_idxs = self._get_sim_filtered_idxs(msa)\n original_msa_sample_idxs = original_msa_sample_idxs[sample_idxs]\n msa = msa[sample_idxs]\n weights = weights[sample_idxs]\n\n sample_idxs = self.method.get_sample_idxs(msa=msa, weights=weights, seed=seed)\n original_msa_sample_idxs = original_msa_sample_idxs[sample_idxs]\n del msa, weights\n\n if self.force_include_first:\n original_msa_sample_idxs = np.concatenate(\n [[0], original_msa_sample_idxs[original_msa_sample_idxs != 0]]\n )\n return original_msa_sample_idxs" }, { "identifier": "NeighborsSampler", "path": "poet/msa/sampling.py", "snippet": "class NeighborsSampler(BaseModel):\n sampler_type: Literal[\"neighbors\"] = \"neighbors\"\n theta: float = 0.2\n can_use_torch: bool = True\n\n def get_weights(\n self, msa: np.ndarray, gap_token: int, result_cache_dir: Optional[Path] = None\n ) -> tuple[Optional[float], Optional[np.ndarray]]:\n assert msa.dtype == np.uint8\n return compute_homology_weights(\n ungapped_msa=msa,\n theta=self.theta,\n gap_token=gap_token,\n gap_token_mask=255,\n result_cache_dir=result_cache_dir,\n can_use_torch=self.can_use_torch,\n )\n\n def get_sample_idxs(\n self,\n msa: np.ndarray,\n weights: Optional[np.ndarray] = None,\n seed: Optional[int] = None,\n ) -> np.ndarray:\n assert weights is not None\n if len(msa) == 0:\n return np.array([], dtype=int)\n size = len(msa)\n rng = np.random.default_rng(seed) if seed is not None else np.random\n return rng.choice(len(msa), replace=False, size=size, p=weights / weights.sum())" } ]
import argparse import itertools import string import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from pathlib import Path from typing import Callable, Optional, Sequence, TypeVar from torch.nn.utils.rnn import pad_sequence from tqdm import tqdm, trange from poet.alphabets import Uniprot21 from poet.fasta import parse_stream from poet.models.modules.packed_sequence import PackedTensorSequences from poet.models.poet import PoET from poet.msa.sampling import MSASampler, NeighborsSampler
11,936
batch_first=True, padding_value=alphabet.mask_token, ) if this_variants.size(1) < max_variant_length: this_variants = F.pad( this_variants, (0, max_variant_length - this_variants.size(1)), value=alphabet.mask_token, ) assert (this_variants == alphabet.gap_token).sum() == 0 this_variants = this_variants.cuda() logits = model.logits(this_variants[:, :-1], memory, preallocated_memory=True) targets = this_variants[:, 1:] score = -criteria.forward(logits.transpose(1, 2), targets).float().sum(dim=1) logps.append(score.cpu().numpy()) return np.hstack(logps) def get_logps_tiered_fast( msa_sequences: Sequence[np.ndarray], variants: Sequence[np.ndarray], model: PoET, batch_size: int, alphabet: Uniprot21, pbar_position: Optional[int] = None, ) -> np.ndarray: if len(msa_sequences) > 0: segment_sizes = torch.tensor([len(s) for s in msa_sequences]).cuda() msa_sequences: torch.Tensor = torch.cat( [torch.from_numpy(s).long() for s in msa_sequences] ).cuda() memory = model.embed( msa_sequences.unsqueeze(0), segment_sizes.unsqueeze(0), pbar_position=pbar_position, ) else: memory = None return _get_logps_tiered_fast( memory=memory, variants=variants, model=model, batch_size=batch_size, alphabet=alphabet, pbar_position=pbar_position, ) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--ckpt_path", type=str, default="data/poet.ckpt") parser.add_argument( "--msa_a3m_path", type=str, default="data/BLAT_ECOLX_ColabFold_2202.a3m" ) parser.add_argument( "--variants_fasta_path", type=str, default="data/BLAT_ECOLX_Jacquier_2013_variants.fasta", ) parser.add_argument( "--output_npy_path", type=str, default="data/BLAT_ECOLX_Jacquier_2013_variants.npy", ) parser.add_argument("--batch_size", type=int, default=8) parser.add_argument("--seed", type=int, default=188257) parser.add_argument( "--debug", action="store_true", help="run only 1/15 params from the msa sampling and filtering ensemble", ) args = parser.parse_args() args.msa_a3m_path = Path(args.msa_a3m_path) args.variants_fasta_path = Path(args.variants_fasta_path) args.output_npy_path = Path(args.output_npy_path) return args @torch.inference_mode() def main(): args = parse_args() # load model ckpt = torch.load(args.ckpt_path) model = PoET(**ckpt["hyper_parameters"]["model_spec"]["init_args"]) model.load_state_dict( {k.split(".", 1)[1]: v for k, v in ckpt["state_dict"].items()} ) del ckpt model = model.cuda().half().eval() alphabet = Uniprot21( include_gap=True, include_startstop=True, distinct_startstop=True ) jit_warmup(model, alphabet) # get variants to score variants = [ append_startstop(alphabet.encode(v), alphabet=alphabet) for v in get_seqs_from_fastalike(args.variants_fasta_path) ] # process msa msa_sequences = get_seqs_from_fastalike(args.msa_a3m_path) msa = get_encoded_msa_from_a3m_seqs(msa_sequences=msa_sequences, alphabet=alphabet) # score the variants logps = [] if not args.debug: params = list( itertools.product( [6144, 12288, 24576], [1.0, 0.95, 0.90, 0.70, 0.50], ) ) else: params = [(12288, 0.95)] for max_tokens, max_similarity in tqdm(params, desc="ensemble"): sampler = MSASampler(
ASCII_LOWERCASE_BYTES = string.ascii_lowercase.encode() PBAR_POSITION = 1 T = TypeVar("T", np.ndarray, torch.Tensor) def append_startstop(x: T, alphabet: Uniprot21) -> T: x_ndim = x.ndim assert x_ndim in {1, 2} if x_ndim == 1: x = x[None, :] if isinstance(x, torch.Tensor): empty_func = torch.empty else: empty_func = np.empty x_ = empty_func((x.shape[0], x.shape[1] + 2), dtype=x.dtype) x_[:, 0] = alphabet.start_token x_[:, -1] = alphabet.stop_token x_[:, 1:-1] = x if x_ndim == 1: x_ = x_.flatten() return x_ def get_seqs_from_fastalike(filepath: Path) -> list[bytes]: return [s for _, s in parse_stream(open(filepath, "rb"), upper=False)] def get_encoded_msa_from_a3m_seqs( msa_sequences: list[bytes], alphabet: Uniprot21 ) -> np.ndarray: return np.vstack( [ alphabet.encode(s.translate(None, delete=ASCII_LOWERCASE_BYTES)) for s in msa_sequences ] ) def sample_msa_sequences( get_sequence_fn: Callable[[int], bytes], sample_idxs: Sequence[int], max_tokens: int, alphabet: Uniprot21, shuffle: bool = True, shuffle_seed: Optional[int] = None, truncate: bool = True, ) -> list[np.ndarray]: assert alphabet.start_token != -1 assert alphabet.stop_token != -1 if not shuffle: assert shuffle_seed is None seqs, total_tokens = [], 0 for idx in sample_idxs: next_sequence = get_sequence_fn(idx) seqs.append(append_startstop(alphabet.encode(next_sequence), alphabet=alphabet)) total_tokens += len(seqs[-1]) if total_tokens > max_tokens: break # shuffle order and truncate to max tokens if shuffle: rng = ( np.random.default_rng(shuffle_seed) if shuffle_seed is not None else np.random ) final_permutation = rng.permutation(len(seqs)) else: final_permutation = np.arange(len(seqs)) final_seqs, total_tokens = [], 0 for seq in [seqs[i] for i in final_permutation]: if truncate and (total_tokens + len(seq) > max_tokens): seq = seq[: max_tokens - total_tokens] total_tokens += len(seq) final_seqs.append(seq) if total_tokens >= max_tokens: break return final_seqs def jit_warmup(embedding_model: PoET, alphabet: Uniprot21): x = b"$WAAAGH*$WAAGW*" segment_sizes = [8, 7] x = alphabet.encode(x) # encode x into the uniprot21 alphabet x = torch.from_numpy(x).long().cuda() segment_sizes = torch.tensor(segment_sizes).long().cuda() _ = embedding_model.embed(x.unsqueeze(0), segment_sizes.unsqueeze(0)) def _get_logps_tiered_fast( memory: Optional[list[PackedTensorSequences]], variants: Sequence[np.ndarray], model: PoET, batch_size: int, alphabet: Uniprot21, pbar_position: Optional[int] = None, ) -> np.ndarray: max_variant_length = max(len(v) for v in variants) memory = model.logits_allocate_memory( memory=memory, batch_size=batch_size, length=max_variant_length - 1, # discount stop token ) criteria = nn.CrossEntropyLoss(ignore_index=alphabet.mask_token, reduction="none") logps = [] if pbar_position is not None: pbar = trange( 0, len(variants), batch_size, desc=f"[{pbar_position}] decoding", leave=False, position=pbar_position, ) else: pbar = range(0, len(variants), batch_size) for start_idx in pbar: this_variants = variants[start_idx : start_idx + batch_size] this_variants = pad_sequence( [torch.from_numpy(v).long() for v in this_variants], batch_first=True, padding_value=alphabet.mask_token, ) if this_variants.size(1) < max_variant_length: this_variants = F.pad( this_variants, (0, max_variant_length - this_variants.size(1)), value=alphabet.mask_token, ) assert (this_variants == alphabet.gap_token).sum() == 0 this_variants = this_variants.cuda() logits = model.logits(this_variants[:, :-1], memory, preallocated_memory=True) targets = this_variants[:, 1:] score = -criteria.forward(logits.transpose(1, 2), targets).float().sum(dim=1) logps.append(score.cpu().numpy()) return np.hstack(logps) def get_logps_tiered_fast( msa_sequences: Sequence[np.ndarray], variants: Sequence[np.ndarray], model: PoET, batch_size: int, alphabet: Uniprot21, pbar_position: Optional[int] = None, ) -> np.ndarray: if len(msa_sequences) > 0: segment_sizes = torch.tensor([len(s) for s in msa_sequences]).cuda() msa_sequences: torch.Tensor = torch.cat( [torch.from_numpy(s).long() for s in msa_sequences] ).cuda() memory = model.embed( msa_sequences.unsqueeze(0), segment_sizes.unsqueeze(0), pbar_position=pbar_position, ) else: memory = None return _get_logps_tiered_fast( memory=memory, variants=variants, model=model, batch_size=batch_size, alphabet=alphabet, pbar_position=pbar_position, ) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--ckpt_path", type=str, default="data/poet.ckpt") parser.add_argument( "--msa_a3m_path", type=str, default="data/BLAT_ECOLX_ColabFold_2202.a3m" ) parser.add_argument( "--variants_fasta_path", type=str, default="data/BLAT_ECOLX_Jacquier_2013_variants.fasta", ) parser.add_argument( "--output_npy_path", type=str, default="data/BLAT_ECOLX_Jacquier_2013_variants.npy", ) parser.add_argument("--batch_size", type=int, default=8) parser.add_argument("--seed", type=int, default=188257) parser.add_argument( "--debug", action="store_true", help="run only 1/15 params from the msa sampling and filtering ensemble", ) args = parser.parse_args() args.msa_a3m_path = Path(args.msa_a3m_path) args.variants_fasta_path = Path(args.variants_fasta_path) args.output_npy_path = Path(args.output_npy_path) return args @torch.inference_mode() def main(): args = parse_args() # load model ckpt = torch.load(args.ckpt_path) model = PoET(**ckpt["hyper_parameters"]["model_spec"]["init_args"]) model.load_state_dict( {k.split(".", 1)[1]: v for k, v in ckpt["state_dict"].items()} ) del ckpt model = model.cuda().half().eval() alphabet = Uniprot21( include_gap=True, include_startstop=True, distinct_startstop=True ) jit_warmup(model, alphabet) # get variants to score variants = [ append_startstop(alphabet.encode(v), alphabet=alphabet) for v in get_seqs_from_fastalike(args.variants_fasta_path) ] # process msa msa_sequences = get_seqs_from_fastalike(args.msa_a3m_path) msa = get_encoded_msa_from_a3m_seqs(msa_sequences=msa_sequences, alphabet=alphabet) # score the variants logps = [] if not args.debug: params = list( itertools.product( [6144, 12288, 24576], [1.0, 0.95, 0.90, 0.70, 0.50], ) ) else: params = [(12288, 0.95)] for max_tokens, max_similarity in tqdm(params, desc="ensemble"): sampler = MSASampler(
method=NeighborsSampler(
5
2023-10-28 01:30:26+00:00
16k
Transconnectome/SwiFT
interpretation/integrated_gradient.py
[ { "identifier": "SwinTransformer4D", "path": "project/module/models/swin4d_transformer_ver7.py", "snippet": "class SwinTransformer4D(nn.Module):\n \"\"\"\n Swin Transformer based on: \"Liu et al.,\n Swin Transformer: Hierarchical Vision Transformer using Shifted Windows\n <https://arxiv.org/abs/2103.14030>\"\n https://github.com/microsoft/Swin-Transformer\n \"\"\"\n\n def __init__(\n self,\n img_size: Tuple,\n in_chans: int,\n embed_dim: int,\n window_size: Sequence[int],\n first_window_size: Sequence[int],\n patch_size: Sequence[int],\n depths: Sequence[int],\n num_heads: Sequence[int],\n mlp_ratio: float = 4.0,\n qkv_bias: bool = True,\n drop_rate: float = 0.0,\n attn_drop_rate: float = 0.0,\n drop_path_rate: float = 0.0,\n norm_layer: Type[LayerNorm] = nn.LayerNorm,\n patch_norm: bool = False,\n use_checkpoint: bool = False,\n spatial_dims: int = 4,\n c_multiplier: int = 2,\n last_layer_full_MSA: bool = False,\n downsample=\"mergingv2\",\n num_classes=2,\n to_float: bool = False,\n **kwargs,\n ) -> None:\n \"\"\"\n Args:\n in_chans: dimension of input channels.\n embed_dim: number of linear projection output channels.\n window_size: local window size.\n patch_size: patch size.\n depths: number of layers in each stage.\n num_heads: number of attention heads.\n mlp_ratio: ratio of mlp hidden dim to embedding dim.\n qkv_bias: add a learnable bias to query, key, value.\n drop_rate: dropout rate.\n attn_drop_rate: attention dropout rate.\n drop_path_rate: stochastic depth rate.\n norm_layer: normalization layer.\n patch_norm: add normalization after patch embedding.\n use_checkpoint: use gradient checkpointing for reduced memory usage.\n spatial_dims: spatial dimension.\n downsample: module used for downsampling, available options are `\"mergingv2\"`, `\"merging\"` and a\n user-specified `nn.Module` following the API defined in :py:class:`monai.networks.nets.PatchMerging`.\n The default is currently `\"merging\"` (the original version defined in v0.9.0).\n\n\n c_multiplier: multiplier for the feature length after patch merging\n \"\"\"\n\n super().__init__()\n img_size = ensure_tuple_rep(img_size, spatial_dims)\n self.num_layers = len(depths)\n self.embed_dim = embed_dim\n self.patch_norm = patch_norm\n self.window_size = window_size\n self.first_window_size = first_window_size\n self.patch_size = patch_size\n self.to_float = to_float\n self.patch_embed = PatchEmbed(\n img_size=img_size,\n patch_size=self.patch_size,\n in_chans=in_chans,\n embed_dim=embed_dim,\n norm_layer=norm_layer if self.patch_norm else None, # type: ignore\n flatten=False,\n spatial_dims=spatial_dims,\n )\n grid_size = self.patch_embed.grid_size\n self.grid_size = grid_size\n self.pos_drop = nn.Dropout(p=drop_rate)\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]\n\n #patch_num = int((img_size[0]/patch_size[0]) * (img_size[1]/patch_size[1]) * (img_size[2]/patch_size[2]))\n #time_num = int(img_size[3]/patch_size[3])\n patch_dim = ((img_size[0]//patch_size[0]), (img_size[1]//patch_size[1]), (img_size[2]//patch_size[2]), (img_size[3]//patch_size[3]))\n\n #print img, patch size, patch dim\n print(\"img_size: \", img_size)\n print(\"patch_size: \", patch_size)\n print(\"patch_dim: \", patch_dim)\n self.pos_embeds = nn.ModuleList()\n pos_embed_dim = embed_dim\n for i in range(self.num_layers):\n self.pos_embeds.append(PositionalEmbedding(pos_embed_dim, patch_dim))\n pos_embed_dim = pos_embed_dim * c_multiplier\n patch_dim = (patch_dim[0]//2, patch_dim[1]//2, patch_dim[2]//2, patch_dim[3])\n\n # build layer\n self.layers = nn.ModuleList()\n down_sample_mod = look_up_option(downsample, MERGING_MODE) if isinstance(downsample, str) else downsample\n \n layer = BasicLayer(\n dim=int(embed_dim),\n depth=depths[0],\n num_heads=num_heads[0],\n window_size=self.first_window_size,\n drop_path=dpr[sum(depths[:0]) : sum(depths[: 0 + 1])],\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias,\n drop=drop_rate,\n attn_drop=attn_drop_rate,\n norm_layer=norm_layer,\n c_multiplier=c_multiplier,\n downsample=down_sample_mod if 0 < self.num_layers - 1 else None,\n use_checkpoint=use_checkpoint,\n )\n self.layers.append(layer)\n\n # exclude last layer\n for i_layer in range(1, self.num_layers - 1):\n layer = BasicLayer(\n dim=int(embed_dim * (c_multiplier**i_layer)),\n depth=depths[i_layer],\n num_heads=num_heads[i_layer],\n window_size=self.window_size,\n drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])],\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias,\n drop=drop_rate,\n attn_drop=attn_drop_rate,\n norm_layer=norm_layer,\n c_multiplier=c_multiplier,\n downsample=down_sample_mod if i_layer < self.num_layers - 1 else None,\n use_checkpoint=use_checkpoint,\n )\n self.layers.append(layer)\n\n if not last_layer_full_MSA:\n layer = BasicLayer(\n dim=int(embed_dim * c_multiplier ** (self.num_layers - 1)),\n depth=depths[(self.num_layers - 1)],\n num_heads=num_heads[(self.num_layers - 1)],\n window_size=self.window_size,\n drop_path=dpr[sum(depths[: (self.num_layers - 1)]) : sum(depths[: (self.num_layers - 1) + 1])],\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias,\n drop=drop_rate,\n attn_drop=attn_drop_rate,\n norm_layer=norm_layer,\n c_multiplier=c_multiplier,\n downsample=None,\n use_checkpoint=use_checkpoint,\n )\n self.layers.append(layer)\n\n else:\n #################Full MSA for last layer#####################\n\n self.last_window_size = (\n self.grid_size[0] // int(2 ** (self.num_layers - 1)),\n self.grid_size[1] // int(2 ** (self.num_layers - 1)),\n self.grid_size[2] // int(2 ** (self.num_layers - 1)),\n self.window_size[3],\n )\n\n layer = BasicLayer_FullAttention(\n dim=int(embed_dim * c_multiplier ** (self.num_layers - 1)),\n depth=depths[(self.num_layers - 1)],\n num_heads=num_heads[(self.num_layers - 1)],\n # change the window size to the entire grid size\n window_size=self.last_window_size,\n drop_path=dpr[sum(depths[: (self.num_layers - 1)]) : sum(depths[: (self.num_layers - 1) + 1])],\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias,\n drop=drop_rate,\n attn_drop=attn_drop_rate,\n norm_layer=norm_layer,\n c_multiplier=c_multiplier,\n downsample=None,\n use_checkpoint=use_checkpoint,\n )\n self.layers.append(layer)\n\n #############################################################\n\n self.num_features = int(embed_dim * c_multiplier ** (self.num_layers - 1))\n\n self.norm = norm_layer(self.num_features)\n self.avgpool = nn.AdaptiveAvgPool1d(1) #\n self.head = nn.Linear(self.num_features, 1) if num_classes == 2 else num_classes\n\n\n def forward(self, x):\n\n #print model parameters\n # for name, param in self.named_parameters():\n # if param.requires_grad:\n # print(name, param.data.shape)\n\n if self.to_float:\n # converting tensor to float\n x = x.float()\n x = self.patch_embed(x)\n x = self.pos_drop(x) # (b, c, h, w, d, t)\n\n for i in range(self.num_layers):\n x = self.pos_embeds[i](x)\n x = self.layers[i](x.contiguous())\n\n # moved this part to clf_mlp or reg_mlp\n\n # x = x.flatten(start_dim=2).transpose(1, 2) # B L C\n # x = self.norm(x) # B L C\n # x = self.avgpool(x.transpose(1, 2)) # B C 1\n # x = torch.flatten(x, 1)\n # x = self.head(x)\n\n return x" }, { "identifier": "LitClassifier", "path": "project/module/pl_classifier.py", "snippet": "class LitClassifier(pl.LightningModule):\n def __init__(self,data_module, **kwargs):\n super().__init__()\n self.save_hyperparameters(kwargs) # save hyperparameters except data_module (data_module cannot be pickled as a checkpoint)\n \n # you should define target_values at the Dataset classes\n target_values = data_module.train_dataset.target_values\n if self.hparams.label_scaling_method == 'standardization':\n scaler = StandardScaler()\n normalized_target_values = scaler.fit_transform(target_values)\n print(f'target_mean:{scaler.mean_[0]}, target_std:{scaler.scale_[0]}')\n elif self.hparams.label_scaling_method == 'minmax': \n scaler = MinMaxScaler()\n normalized_target_values = scaler.fit_transform(target_values)\n print(f'target_max:{scaler.data_max_[0]},target_min:{scaler.data_min_[0]}')\n self.scaler = scaler\n print(self.hparams.model)\n self.model = load_model(self.hparams.model, self.hparams)\n\n # Heads\n if not self.hparams.pretraining:\n if self.hparams.downstream_task == 'sex' or self.hparams.downstream_task_type == 'classification' or self.hparams.scalability_check:\n self.output_head = load_model(\"clf_mlp\", self.hparams)\n elif self.hparams.downstream_task == 'age' or self.hparams.downstream_task == 'int_total' or self.hparams.downstream_task == 'int_fluid' or self.hparams.downstream_task_type == 'regression':\n self.output_head = load_model(\"reg_mlp\", self.hparams)\n elif self.hparams.use_contrastive:\n self.output_head = load_model(\"emb_mlp\", self.hparams)\n else:\n raise NotImplementedError(\"output head should be defined\")\n\n self.metric = Metrics()\n\n if self.hparams.adjust_thresh:\n self.threshold = 0\n\n def forward(self, x):\n return self.output_head(self.model(x))\n \n def augment(self, img):\n\n B, C, H, W, D, T = img.shape\n\n device = img.device\n img = rearrange(img, 'b c h w d t -> b t c h w d')\n\n rand_affine = monai_t.RandAffine(\n prob=1.0,\n # 0.175 rad = 10 degrees\n rotate_range=(0.175, 0.175, 0.175),\n scale_range = (0.1, 0.1, 0.1),\n mode = \"bilinear\",\n padding_mode = \"border\",\n device = device\n )\n rand_noise = monai_t.RandGaussianNoise(prob=0.3, std=0.1)\n rand_smooth = monai_t.RandGaussianSmooth(sigma_x=(0.0, 0.5), sigma_y=(0.0, 0.5), sigma_z=(0.0, 0.5), prob=0.1)\n if self.hparams.augment_only_intensity:\n comp = monai_t.Compose([rand_noise, rand_smooth])\n else:\n comp = monai_t.Compose([rand_affine, rand_noise, rand_smooth]) \n\n for b in range(B):\n aug_seed = torch.randint(0, 10000000, (1,)).item()\n # set augmentation seed to be the same for all time steps\n for t in range(T):\n if self.hparams.augment_only_affine:\n rand_affine.set_random_state(seed=aug_seed)\n img[b, t, :, :, :, :] = rand_affine(img[b, t, :, :, :, :])\n else:\n comp.set_random_state(seed=aug_seed)\n img[b, t, :, :, :, :] = comp(img[b, t, :, :, :, :])\n\n img = rearrange(img, 'b t c h w d -> b c h w d t')\n\n return img\n \n def _compute_logits(self, batch, augment_during_training=None):\n fmri, subj, target_value, tr, sex = batch.values()\n \n if augment_during_training:\n fmri = self.augment(fmri)\n\n feature = self.model(fmri)\n\n # Classification task\n if self.hparams.downstream_task == 'sex' or self.hparams.downstream_task_type == 'classification' or self.hparams.scalability_check:\n logits = self.output_head(feature).squeeze() #self.clf(feature).squeeze()\n target = target_value.float().squeeze()\n # Regression task\n elif self.hparams.downstream_task == 'age' or self.hparams.downstream_task == 'int_total' or self.hparams.downstream_task == 'int_fluid' or self.hparams.downstream_task_type == 'regression':\n # target_mean, target_std = self.determine_target_mean_std()\n logits = self.output_head(feature) # (batch,1) or # tuple((batch,1), (batch,1))\n unnormalized_target = target_value.float() # (batch,1)\n if self.hparams.label_scaling_method == 'standardization': # default\n target = (unnormalized_target - self.scaler.mean_[0]) / (self.scaler.scale_[0])\n elif self.hparams.label_scaling_method == 'minmax':\n target = (unnormalized_target - self.scaler.data_min_[0]) / (self.scaler.data_max_[0] - self.scaler.data_min_[0])\n \n return subj, logits, target\n \n def _calculate_loss(self, batch, mode):\n if self.hparams.pretraining:\n fmri, subj, target_value, tr, sex = batch.values()\n \n cond1 = (self.hparams.in_chans == 1 and not self.hparams.with_voxel_norm)\n assert cond1, \"Wrong combination of options\"\n loss = 0\n\n if self.hparams.use_contrastive:\n assert self.hparams.contrastive_type != \"none\", \"Contrastive type not specified\"\n\n # B, C, H, W, D, T = image shape\n y, diff_y = fmri\n\n batch_size = y.shape[0]\n if (len(subj) != len(tuple(subj))) and mode == 'train':\n print('Some sub-sequences in a batch came from the same subject!')\n criterion = NTXentLoss(device='cuda', batch_size=batch_size,\n temperature=self.hparams.temperature,\n use_cosine_similarity=True).cuda()\n criterion_ll = NTXentLoss(device='cuda', batch_size=2,\n temperature=self.hparams.temperature,\n use_cosine_similarity=True).cuda()\n \n # type 1: IC\n # type 2: LL\n # type 3: IC + LL\n if self.hparams.contrastive_type in [1, 3]:\n out_global_1 = self.output_head(self.model(self.augment(y)),\"g\")\n out_global_2 = self.output_head(self.model(self.augment(diff_y)),\"g\")\n ic_loss = criterion(out_global_1, out_global_2)\n loss += ic_loss\n\n if self.hparams.contrastive_type in [2, 3]:\n out_local_1 = []\n out_local_2 = []\n out_local_swin1 = self.model(self.augment(y))\n out_local_swin2 = self.model(self.augment(y))\n out_local_1.append(self.output_head(out_local_swin1, \"l\"))\n out_local_2.append(self.output_head(out_local_swin2, \"l\"))\n\n out_local_swin1 = self.model(self.augment(diff_y))\n out_local_swin2 = self.model(self.augment(diff_y))\n out_local_1.append(self.output_head(out_local_swin1, \"l\"))\n out_local_2.append(self.output_head(out_local_swin2, \"l\"))\n\n ll_loss = 0\n # loop over batch size\n for i in range(out_local_1[0].shape[0]):\n # out_local shape should be: BS, n_local_clips, D\n ll_loss += criterion_ll(torch.stack(out_local_1, dim=1)[i],\n torch.stack(out_local_2, dim=1)[i])\n loss += ll_loss\n\n result_dict = {\n f\"{mode}_loss\": loss,\n } \n else:\n subj, logits, target = self._compute_logits(batch, augment_during_training = self.hparams.augment_during_training)\n\n if self.hparams.downstream_task == 'sex' or self.hparams.downstream_task_type == 'classification' or self.hparams.scalability_check:\n loss = F.binary_cross_entropy_with_logits(logits, target) # target is float\n acc = self.metric.get_accuracy_binary(logits, target.float().squeeze())\n result_dict = {\n f\"{mode}_loss\": loss,\n f\"{mode}_acc\": acc,\n }\n\n elif self.hparams.downstream_task == 'age' or self.hparams.downstream_task == 'int_total' or self.hparams.downstream_task == 'int_fluid' or self.hparams.downstream_task_type == 'regression':\n loss = F.mse_loss(logits.squeeze(), target.squeeze())\n l1 = F.l1_loss(logits.squeeze(), target.squeeze())\n result_dict = {\n f\"{mode}_loss\": loss,\n f\"{mode}_mse\": loss,\n f\"{mode}_l1_loss\": l1\n }\n self.log_dict(result_dict, prog_bar=True, sync_dist=False, add_dataloader_idx=False, on_step=True, on_epoch=True, batch_size=self.hparams.batch_size) # batch_size = batch_size\n return loss\n\n def _evaluate_metrics(self, subj_array, total_out, mode):\n # print('total_out.device',total_out.device)\n # (total iteration/world_size) numbers of samples are passed into _evaluate_metrics.\n subjects = np.unique(subj_array)\n \n subj_avg_logits = []\n subj_targets = []\n for subj in subjects:\n #print('total_out.shape:',total_out.shape) # total_out.shape: torch.Size([16, 2])\n subj_logits = total_out[subj_array == subj,0] \n subj_avg_logits.append(torch.mean(subj_logits).item())\n subj_targets.append(total_out[subj_array == subj,1][0].item())\n subj_avg_logits = torch.tensor(subj_avg_logits, device = total_out.device) \n subj_targets = torch.tensor(subj_targets, device = total_out.device) \n \n \n if self.hparams.downstream_task == 'sex' or self.hparams.downstream_task_type == 'classification' or self.hparams.scalability_check:\n if self.hparams.adjust_thresh:\n # move threshold to maximize balanced accuracy\n best_bal_acc = 0\n best_thresh = 0\n for thresh in np.arange(-5, 5, 0.01):\n bal_acc = balanced_accuracy_score(subj_targets.cpu(), (subj_avg_logits>=thresh).int().cpu())\n if bal_acc > best_bal_acc:\n best_bal_acc = bal_acc\n best_thresh = thresh\n self.log(f\"{mode}_best_thresh\", best_thresh, sync_dist=True)\n self.log(f\"{mode}_best_balacc\", best_bal_acc, sync_dist=True)\n fpr, tpr, thresholds = roc_curve(subj_targets.cpu(), subj_avg_logits.cpu())\n idx = np.argmax(tpr - fpr)\n youden_thresh = thresholds[idx]\n acc_func = BinaryAccuracy().to(total_out.device)\n self.log(f\"{mode}_youden_thresh\", youden_thresh, sync_dist=True)\n self.log(f\"{mode}_youden_balacc\", balanced_accuracy_score(subj_targets.cpu(), (subj_avg_logits>=youden_thresh).int().cpu()), sync_dist=True)\n\n if mode == 'valid':\n self.threshold = youden_thresh\n elif mode == 'test':\n bal_acc = balanced_accuracy_score(subj_targets.cpu(), (subj_avg_logits>=self.threshold).int().cpu())\n self.log(f\"{mode}_balacc_from_valid_thresh\", bal_acc, sync_dist=True)\n else:\n acc_func = BinaryAccuracy().to(total_out.device)\n \n auroc_func = BinaryAUROC().to(total_out.device)\n acc = acc_func((subj_avg_logits >= 0).int(), subj_targets)\n #print((subj_avg_logits>=0).int().cpu())\n #print(subj_targets.cpu())\n bal_acc_sk = balanced_accuracy_score(subj_targets.cpu(), (subj_avg_logits>=0).int().cpu())\n auroc = auroc_func(torch.sigmoid(subj_avg_logits), subj_targets)\n\n self.log(f\"{mode}_acc\", acc, sync_dist=True)\n self.log(f\"{mode}_balacc\", bal_acc_sk, sync_dist=True)\n self.log(f\"{mode}_AUROC\", auroc, sync_dist=True)\n\n # regression target is normalized\n elif self.hparams.downstream_task == 'age' or self.hparams.downstream_task == 'int_total' or self.hparams.downstream_task == 'int_fluid' or self.hparams.downstream_task_type == 'regression': \n mse = F.mse_loss(subj_avg_logits, subj_targets)\n mae = F.l1_loss(subj_avg_logits, subj_targets)\n \n # reconstruct to original scale\n if self.hparams.label_scaling_method == 'standardization': # default\n adjusted_mse = F.mse_loss(subj_avg_logits * self.scaler.scale_[0] + self.scaler.mean_[0], subj_targets * self.scaler.scale_[0] + self.scaler.mean_[0])\n adjusted_mae = F.l1_loss(subj_avg_logits * self.scaler.scale_[0] + self.scaler.mean_[0], subj_targets * self.scaler.scale_[0] + self.scaler.mean_[0])\n elif self.hparams.label_scaling_method == 'minmax':\n adjusted_mse = F.mse_loss(subj_avg_logits * (self.scaler.data_max_[0] - self.scaler.data_min_[0]) + self.scaler.data_min_[0], subj_targets * (self.scaler.data_max_[0] - self.scaler.data_min_[0]) + self.scaler.data_min_[0])\n adjusted_mae = F.l1_loss(subj_avg_logits * (self.scaler.data_max_[0] - self.scaler.data_min_[0]) + self.scaler.data_min_[0], subj_targets * (self.scaler.data_max_[0] - self.scaler.data_min_[0]) + self.scaler.data_min_[0])\n pearson = PearsonCorrCoef().to(total_out.device)\n prearson_coef = pearson(subj_avg_logits, subj_targets)\n \n self.log(f\"{mode}_corrcoef\", prearson_coef, sync_dist=True)\n self.log(f\"{mode}_mse\", mse, sync_dist=True)\n self.log(f\"{mode}_mae\", mae, sync_dist=True)\n self.log(f\"{mode}_adjusted_mse\", adjusted_mse, sync_dist=True) \n self.log(f\"{mode}_adjusted_mae\", adjusted_mae, sync_dist=True) \n\n def training_step(self, batch, batch_idx):\n loss = self._calculate_loss(batch, mode=\"train\")\n return loss\n\n def validation_step(self, batch, batch_idx, dataloader_idx):\n if self.hparams.pretraining:\n if dataloader_idx == 0:\n self._calculate_loss(batch, mode=\"valid\")\n else:\n self._calculate_loss(batch, mode=\"test\")\n else:\n subj, logits, target = self._compute_logits(batch)\n if self.hparams.downstream_task_type == 'multi_task':\n output = torch.stack([logits[1].squeeze(), target], dim=1) # logits[1] : regression head\n else:\n output = torch.stack([logits.squeeze(), target.squeeze()], dim=1)\n return (subj, output)\n\n def validation_epoch_end(self, outputs):\n # called at the end of the validation epoch\n # outputs is an array with what you returned in validation_step for each batch\n # outputs = [{'loss': batch_0_loss}, {'loss': batch_1_loss}, ..., {'loss': batch_n_loss}] \n if not self.hparams.pretraining:\n outputs_valid = outputs[0]\n outputs_test = outputs[1]\n subj_valid = []\n subj_test = []\n out_valid_list = []\n out_test_list = []\n for subj, out in outputs_valid:\n subj_valid += subj\n out_valid_list.append(out.detach())\n for subj, out in outputs_test:\n subj_test += subj\n out_test_list.append(out.detach())\n subj_valid = np.array(subj_valid)\n subj_test = np.array(subj_test)\n total_out_valid = torch.cat(out_valid_list, dim=0)\n total_out_test = torch.cat(out_test_list, dim=0)\n\n # save model predictions if it is needed for future analysis\n # self._save_predictions(subj_valid,total_out_valid,mode=\"valid\")\n # self._save_predictions(subj_test,total_out_test, mode=\"test\") \n \n # evaluate \n self._evaluate_metrics(subj_valid, total_out_valid, mode=\"valid\")\n self._evaluate_metrics(subj_test, total_out_test, mode=\"test\")\n \n # If you use loggers other than Neptune you may need to modify this\n def _save_predictions(self,total_subjs,total_out, mode):\n self.subject_accuracy = {}\n for subj, output in zip(total_subjs,total_out):\n if self.hparams.downstream_task == 'sex':\n score = torch.sigmoid(output[0]).item()\n else:\n score = output[0].item()\n\n if subj not in self.subject_accuracy:\n self.subject_accuracy[subj] = {'score': [score], 'mode':mode, 'truth':output[1], 'count':1}\n else:\n self.subject_accuracy[subj]['score'].append(score)\n self.subject_accuracy[subj]['count']+=1\n \n if self.hparams.strategy == None : \n pass\n elif 'ddp' in self.hparams.strategy and len(self.subject_accuracy) > 0:\n world_size = torch.distributed.get_world_size()\n total_subj_accuracy = [None for _ in range(world_size)]\n torch.distributed.all_gather_object(total_subj_accuracy,self.subject_accuracy) # gather and broadcast to whole ranks \n accuracy_dict = {}\n for dct in total_subj_accuracy:\n for subj, metric_dict in dct.items():\n if subj not in accuracy_dict:\n accuracy_dict[subj] = metric_dict\n else:\n accuracy_dict[subj]['score']+=metric_dict['score']\n accuracy_dict[subj]['count']+=metric_dict['count']\n self.subject_accuracy = accuracy_dict\n if self.trainer.is_global_zero:\n for subj_name,subj_dict in self.subject_accuracy.items():\n subj_pred = np.mean(subj_dict['score'])\n subj_error = np.std(subj_dict['score'])\n subj_truth = subj_dict['truth'].item()\n subj_count = subj_dict['count']\n subj_mode = subj_dict['mode'] # train, val, test\n\n # only save samples at rank 0 (total iterations/world_size numbers are saved) \n os.makedirs(os.path.join('predictions',self.hparams.id), exist_ok=True)\n with open(os.path.join('predictions',self.hparams.id,'iter_{}.txt'.format(self.current_epoch)),'a+') as f:\n f.write('subject:{} ({})\\ncount: {} outputs: {:.4f}\\u00B1{:.4f} - truth: {}\\n'.format(subj_name,subj_mode,subj_count,subj_pred,subj_error,subj_truth))\n\n with open(os.path.join('predictions',self.hparams.id,'iter_{}.pkl'.format(self.current_epoch)),'wb') as fw:\n pickle.dump(self.subject_accuracy, fw)\n\n def test_step(self, batch, batch_idx):\n subj, logits, target = self._compute_logits(batch)\n output = torch.stack([logits.squeeze(), target.squeeze()], dim=1)\n return (subj, output)\n\n def test_epoch_end(self, outputs):\n if not self.hparams.pretraining:\n subj_test = [] \n out_test_list = []\n for subj, out in outputs:\n subj_test += subj\n out_test_list.append(out.detach())\n subj_test = np.array(subj_test)\n total_out_test = torch.cat(out_test_list, dim=0)\n # self._save_predictions(subj_test, total_out_test, mode=\"test\") \n self._evaluate_metrics(subj_test, total_out_test, mode=\"test\")\n \n def on_train_epoch_start(self) -> None:\n self.starter, self.ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)\n self.total_time = 0\n self.repetitions = 200\n self.gpu_warmup = 50\n self.timings=np.zeros((self.repetitions,1))\n return super().on_train_epoch_start()\n \n def on_train_batch_start(self, batch, batch_idx):\n if self.hparams.scalability_check:\n if batch_idx < self.gpu_warmup:\n pass\n elif (batch_idx-self.gpu_warmup) < self.repetitions:\n self.starter.record()\n return super().on_train_batch_start(batch, batch_idx)\n \n def on_train_batch_end(self, out, batch, batch_idx):\n if self.hparams.scalability_check:\n if batch_idx < self.gpu_warmup:\n pass\n elif (batch_idx-self.gpu_warmup) < self.repetitions:\n self.ender.record()\n torch.cuda.synchronize()\n curr_time = self.starter.elapsed_time(self.ender) / 1000\n self.total_time += curr_time\n self.timings[batch_idx-self.gpu_warmup] = curr_time\n elif (batch_idx-self.gpu_warmup) == self.repetitions:\n mean_syn = np.mean(self.timings)\n std_syn = np.std(self.timings)\n \n Throughput = (self.repetitions*self.hparams.batch_size*int(self.hparams.num_nodes) * int(self.hparams.devices))/self.total_time\n \n self.log(f\"Throughput\", Throughput, sync_dist=False)\n self.log(f\"mean_time\", mean_syn, sync_dist=False)\n self.log(f\"std_time\", std_syn, sync_dist=False)\n print('mean_syn:',mean_syn)\n print('std_syn:',std_syn)\n \n return super().on_train_batch_end(out, batch, batch_idx)\n\n\n # def on_before_optimizer_step(self, optimizer, optimizer_idx: int) -> None:\n\n def configure_optimizers(self):\n if self.hparams.optimizer == \"AdamW\":\n optim = torch.optim.AdamW(\n self.parameters(), lr=self.hparams.learning_rate, weight_decay=self.hparams.weight_decay\n )\n elif self.hparams.optimizer == \"SGD\":\n optim = torch.optim.SGD(\n self.parameters(), lr=self.hparams.learning_rate, weight_decay=self.hparams.weight_decay, momentum=self.hparams.momentum\n )\n else:\n print(\"Error: Input a correct optimizer name (default: AdamW)\")\n \n if self.hparams.use_scheduler:\n print()\n print(\"training steps: \" + str(self.trainer.estimated_stepping_batches))\n print(\"using scheduler\")\n print()\n total_iterations = self.trainer.estimated_stepping_batches # ((number of samples/batch size)/number of gpus) * num_epochs\n gamma = self.hparams.gamma\n base_lr = self.hparams.learning_rate\n warmup = int(total_iterations * 0.05) # adjust the length of warmup here.\n T_0 = int(self.hparams.cycle * total_iterations)\n T_mult = 1\n \n sche = CosineAnnealingWarmUpRestarts(optim, first_cycle_steps=T_0, cycle_mult=T_mult, max_lr=base_lr,min_lr=1e-9, warmup_steps=warmup, gamma=gamma)\n print('total iterations:',self.trainer.estimated_stepping_batches * self.hparams.max_epochs)\n\n scheduler = {\n \"scheduler\": sche,\n \"name\": \"lr_history\",\n \"interval\": \"step\",\n }\n\n return [optim], [scheduler]\n else:\n return optim\n\n @staticmethod\n def add_model_specific_args(parent_parser):\n parser = ArgumentParser(parents=[parent_parser], add_help=False, formatter_class=ArgumentDefaultsHelpFormatter)\n group = parser.add_argument_group(\"Default classifier\")\n # training related\n group.add_argument(\"--grad_clip\", action='store_true', help=\"whether to use gradient clipping\")\n group.add_argument(\"--optimizer\", type=str, default=\"AdamW\", help=\"which optimizer to use [AdamW, SGD]\")\n group.add_argument(\"--use_scheduler\", action='store_true', help=\"whether to use scheduler\")\n group.add_argument(\"--weight_decay\", type=float, default=0.01, help=\"weight decay for optimizer\")\n group.add_argument(\"--learning_rate\", type=float, default=1e-3, help=\"learning rate for optimizer\")\n group.add_argument(\"--momentum\", type=float, default=0, help=\"momentum for SGD\")\n group.add_argument(\"--gamma\", type=float, default=1.0, help=\"decay for exponential LR scheduler\")\n group.add_argument(\"--cycle\", type=float, default=0.3, help=\"cycle size for CosineAnnealingWarmUpRestarts\")\n group.add_argument(\"--milestones\", nargs=\"+\", default=[100, 150], type=int, help=\"lr scheduler\")\n group.add_argument(\"--adjust_thresh\", action='store_true', help=\"whether to adjust threshold for valid/test\")\n \n # pretraining-related\n group.add_argument(\"--use_contrastive\", action='store_true', help=\"whether to use contrastive learning (specify --contrastive_type argument as well)\")\n group.add_argument(\"--contrastive_type\", default=0, type=int, help=\"combination of contrastive losses to use [1: Use the Instance contrastive loss function, 2: Use the local-local temporal contrastive loss function, 3: Use the sum of both loss functions]\")\n group.add_argument(\"--pretraining\", action='store_true', help=\"whether to use pretraining\")\n group.add_argument(\"--augment_during_training\", action='store_true', help=\"whether to augment input images during training\")\n group.add_argument(\"--augment_only_affine\", action='store_true', help=\"whether to only apply affine augmentation\")\n group.add_argument(\"--augment_only_intensity\", action='store_true', help=\"whether to only apply intensity augmentation\")\n group.add_argument(\"--temperature\", default=0.1, type=float, help=\"temperature for NTXentLoss\")\n \n # model related\n group.add_argument(\"--model\", type=str, default=\"none\", help=\"which model to be used\")\n group.add_argument(\"--in_chans\", type=int, default=1, help=\"Channel size of input image\")\n group.add_argument(\"--embed_dim\", type=int, default=24, help=\"embedding size (recommend to use 24, 36, 48)\")\n group.add_argument(\"--window_size\", nargs=\"+\", default=[4, 4, 4, 4], type=int, help=\"window size from the second layers\")\n group.add_argument(\"--first_window_size\", nargs=\"+\", default=[2, 2, 2, 2], type=int, help=\"first window size\")\n group.add_argument(\"--patch_size\", nargs=\"+\", default=[6, 6, 6, 1], type=int, help=\"patch size\")\n group.add_argument(\"--depths\", nargs=\"+\", default=[2, 2, 6, 2], type=int, help=\"depth of layers in each stage\")\n group.add_argument(\"--num_heads\", nargs=\"+\", default=[3, 6, 12, 24], type=int, help=\"The number of heads for each attention layer\")\n group.add_argument(\"--c_multiplier\", type=int, default=2, help=\"channel multiplier for Swin Transformer architecture\")\n group.add_argument(\"--last_layer_full_MSA\", type=str2bool, default=False, help=\"whether to use full-scale multi-head self-attention at the last layers\")\n group.add_argument(\"--clf_head_version\", type=str, default=\"v1\", help=\"clf head version, v2 has a hidden layer\")\n group.add_argument(\"--attn_drop_rate\", type=float, default=0, help=\"dropout rate of attention layers\")\n\n # others\n group.add_argument(\"--scalability_check\", action='store_true', help=\"whether to check scalability\")\n group.add_argument(\"--process_code\", default=None, help=\"Slurm code/PBS code. Use this argument if you want to save process codes to your log\")\n \n return parser" }, { "identifier": "fMRIDataModule", "path": "project/module/utils/data_module.py", "snippet": "class fMRIDataModule(pl.LightningDataModule):\n def __init__(self, **kwargs):\n super().__init__()\n self.save_hyperparameters()\n\n # generate splits folder\n if self.hparams.pretraining:\n split_dir_path = f'./data/splits/{self.hparams.dataset_name}/pretraining'\n else:\n split_dir_path = f'./data/splits/{self.hparams.dataset_name}'\n os.makedirs(split_dir_path, exist_ok=True)\n self.split_file_path = os.path.join(split_dir_path, f\"split_fixed_{self.hparams.dataset_split_num}.txt\")\n \n self.setup()\n\n #pl.seed_everything(seed=self.hparams.data_seed)\n\n def get_dataset(self):\n if self.hparams.dataset_name == \"Dummy\":\n return Dummy\n elif self.hparams.dataset_name == \"S1200\":\n return S1200\n elif self.hparams.dataset_name == \"ABCD\":\n return ABCD\n elif self.hparams.dataset_name == 'UKB':\n return UKB\n else:\n raise NotImplementedError\n\n def convert_subject_list_to_idx_list(self, train_names, val_names, test_names, subj_list):\n #subj_idx = np.array([str(x[0]) for x in subj_list])\n subj_idx = np.array([str(x[1]) for x in subj_list])\n S = np.unique([x[1] for x in subj_list])\n # print(S)\n print('unique subjects:',len(S)) \n train_idx = np.where(np.in1d(subj_idx, train_names))[0].tolist()\n val_idx = np.where(np.in1d(subj_idx, val_names))[0].tolist()\n test_idx = np.where(np.in1d(subj_idx, test_names))[0].tolist()\n return train_idx, val_idx, test_idx\n \n def save_split(self, sets_dict):\n with open(self.split_file_path, \"w+\") as f:\n for name, subj_list in sets_dict.items():\n f.write(name + \"\\n\")\n for subj_name in subj_list:\n f.write(str(subj_name) + \"\\n\")\n \n def determine_split_randomly(self, S):\n S = list(S.keys())\n S_train = int(len(S) * self.hparams.train_split)\n S_val = int(len(S) * self.hparams.val_split)\n S_train = np.random.choice(S, S_train, replace=False)\n remaining = np.setdiff1d(S, S_train) # np.setdiff1d(np.arange(S), S_train)\n S_val = np.random.choice(remaining, S_val, replace=False)\n S_test = np.setdiff1d(S, np.concatenate([S_train, S_val])) # np.setdiff1d(np.arange(S), np.concatenate([S_train, S_val]))\n # train_idx, val_idx, test_idx = self.convert_subject_list_to_idx_list(S_train, S_val, S_test, self.subject_list)\n self.save_split({\"train_subjects\": S_train, \"val_subjects\": S_val, \"test_subjects\": S_test})\n return S_train, S_val, S_test\n \n def load_split(self):\n subject_order = open(self.split_file_path, \"r\").readlines()\n subject_order = [x[:-1] for x in subject_order]\n train_index = np.argmax([\"train\" in line for line in subject_order])\n val_index = np.argmax([\"val\" in line for line in subject_order])\n test_index = np.argmax([\"test\" in line for line in subject_order])\n train_names = subject_order[train_index + 1 : val_index]\n val_names = subject_order[val_index + 1 : test_index]\n test_names = subject_order[test_index + 1 :]\n return train_names, val_names, test_names\n\n def prepare_data(self):\n # This function is only called at global rank==0\n return\n \n # filter subjects with metadata and pair subject names with their target values (+ sex)\n def make_subject_dict(self):\n # output: {'subj1':[target1,target2],'subj2':[target1,target2]...}\n img_root = os.path.join(self.hparams.image_path, 'img')\n final_dict = dict()\n if self.hparams.dataset_name == \"S1200\":\n subject_list = os.listdir(img_root)\n meta_data = pd.read_csv(os.path.join(self.hparams.image_path, \"metadata\", \"HCP_1200_gender.csv\"))\n meta_data_residual = pd.read_csv(os.path.join(self.hparams.image_path, \"metadata\", \"HCP_1200_precise_age.csv\"))\n meta_data_all = pd.read_csv(os.path.join(self.hparams.image_path, \"metadata\", \"HCP_1200_all.csv\"))\n if self.hparams.downstream_task == 'sex': task_name = 'Gender'\n elif self.hparams.downstream_task == 'age': task_name = 'age'\n elif self.hparams.downstream_task == 'int_total': task_name = 'CogTotalComp_AgeAdj'\n else: raise NotImplementedError()\n\n if self.hparams.downstream_task == 'sex':\n meta_task = meta_data[['Subject',task_name]].dropna()\n elif self.hparams.downstream_task == 'age':\n meta_task = meta_data_residual[['subject',task_name,'sex']].dropna()\n #rename column subject to Subject\n meta_task = meta_task.rename(columns={'subject': 'Subject'})\n elif self.hparams.downstream_task == 'int_total':\n meta_task = meta_data[['Subject',task_name,'Gender']].dropna() \n \n for subject in subject_list:\n if int(subject) in meta_task['Subject'].values:\n if self.hparams.downstream_task == 'sex':\n target = meta_task[meta_task[\"Subject\"]==int(subject)][task_name].values[0]\n target = 1 if target == \"M\" else 0\n sex = target\n elif self.hparams.downstream_task == 'age':\n target = meta_task[meta_task[\"Subject\"]==int(subject)][task_name].values[0]\n sex = meta_task[meta_task[\"Subject\"]==int(subject)][\"sex\"].values[0]\n sex = 1 if sex == \"M\" else 0\n elif self.hparams.downstream_task == 'int_total':\n target = meta_task[meta_task[\"Subject\"]==int(subject)][task_name].values[0]\n sex = meta_task[meta_task[\"Subject\"]==int(subject)][\"Gender\"].values[0]\n sex = 1 if sex == \"M\" else 0\n final_dict[subject]=[sex,target]\n \n elif self.hparams.dataset_name == \"ABCD\":\n subject_list = [subj[4:] for subj in os.listdir(img_root)]\n \n meta_data = pd.read_csv(os.path.join(self.hparams.image_path, \"metadata\", \"ABCD_phenotype_total.csv\"))\n if self.hparams.downstream_task == 'sex': task_name = 'sex'\n elif self.hparams.downstream_task == 'age': task_name = 'age'\n elif self.hparams.downstream_task == 'int_total': task_name = 'nihtbx_totalcomp_uncorrected'\n else: raise ValueError('downstream task not supported')\n \n if self.hparams.downstream_task == 'sex':\n meta_task = meta_data[['subjectkey',task_name]].dropna()\n else:\n meta_task = meta_data[['subjectkey',task_name,'sex']].dropna()\n \n for subject in subject_list:\n if subject in meta_task['subjectkey'].values:\n target = meta_task[meta_task[\"subjectkey\"]==subject][task_name].values[0]\n sex = meta_task[meta_task[\"subjectkey\"]==subject][\"sex\"].values[0]\n final_dict[subject]=[sex,target]\n \n elif self.hparams.dataset_name == \"UKB\":\n if self.hparams.downstream_task == 'sex': task_name = 'sex'\n elif self.hparams.downstream_task == 'age': task_name = 'age'\n elif self.hparams.downstream_task == 'int_fluid' : task_name = 'fluid'\n else: raise ValueError('downstream task not supported')\n \n meta_data = pd.read_csv(os.path.join(self.hparams.image_path, \"metadata\", \"UKB_phenotype_gps_fluidint.csv\"))\n if task_name == 'sex':\n meta_task = meta_data[['eid',task_name]].dropna()\n else:\n meta_task = meta_data[['eid',task_name,'sex']].dropna()\n\n for subject in os.listdir(img_root):\n if subject.endswith('20227_2_0') and (int(subject[:7]) in meta_task['eid'].values):\n target = meta_task[meta_task[\"eid\"]==int(subject[:7])][task_name].values[0]\n sex = meta_task[meta_task[\"eid\"]==int(subject[:7])].values[0]\n final_dict[str(subject[:7])] = [sex,target]\n else:\n continue \n \n return final_dict\n\n def setup(self, stage=None):\n # this function will be called at each devices\n Dataset = self.get_dataset()\n params = {\n \"root\": self.hparams.image_path,\n \"sequence_length\": self.hparams.sequence_length,\n \"contrastive\":self.hparams.use_contrastive,\n \"contrastive_type\":self.hparams.contrastive_type,\n \"stride_between_seq\": self.hparams.stride_between_seq,\n \"stride_within_seq\": self.hparams.stride_within_seq,\n \"with_voxel_norm\": self.hparams.with_voxel_norm,\n \"downstream_task\": self.hparams.downstream_task,\n \"shuffle_time_sequence\": self.hparams.shuffle_time_sequence,\n \"input_type\": self.hparams.input_type,\n \"label_scaling_method\" : self.hparams.label_scaling_method,\n \"dtype\":'float16'}\n \n subject_dict = self.make_subject_dict()\n if os.path.exists(self.split_file_path):\n train_names, val_names, test_names = self.load_split()\n else:\n train_names, val_names, test_names = self.determine_split_randomly(subject_dict)\n \n if self.hparams.bad_subj_path:\n bad_subjects = open(self.hparams.bad_subj_path, \"r\").readlines()\n for bad_subj in bad_subjects:\n bad_subj = bad_subj.strip()\n if bad_subj in list(subject_dict.keys()):\n print(f'removing bad subject: {bad_subj}')\n del subject_dict[bad_subj]\n \n if self.hparams.limit_training_samples:\n train_names = np.random.choice(train_names, size=self.hparams.limit_training_samples, replace=False, p=None)\n \n train_dict = {key: subject_dict[key] for key in train_names if key in subject_dict}\n val_dict = {key: subject_dict[key] for key in val_names if key in subject_dict}\n test_dict = {key: subject_dict[key] for key in test_names if key in subject_dict}\n \n self.train_dataset = Dataset(**params,subject_dict=train_dict,use_augmentations=False, train=True)\n # load train mean/std of target labels to val/test dataloader\n self.val_dataset = Dataset(**params,subject_dict=val_dict,use_augmentations=False,train=False) \n self.test_dataset = Dataset(**params,subject_dict=test_dict,use_augmentations=False,train=False) \n \n print(\"number of train_subj:\", len(train_dict))\n print(\"number of val_subj:\", len(val_dict))\n print(\"number of test_subj:\", len(test_dict))\n print(\"length of train_idx:\", len(self.train_dataset.data))\n print(\"length of val_idx:\", len(self.val_dataset.data)) \n print(\"length of test_idx:\", len(self.test_dataset.data))\n \n # DistributedSampler is internally called in pl.Trainer\n def get_params(train):\n return {\n \"batch_size\": self.hparams.batch_size if train else self.hparams.eval_batch_size,\n \"num_workers\": self.hparams.num_workers,\n \"drop_last\": True,\n \"pin_memory\": False,\n \"persistent_workers\": False if self.hparams.dataset_name == 'Dummy' else (train and (self.hparams.strategy == 'ddp')),\n \"shuffle\": train\n }\n self.train_loader = DataLoader(self.train_dataset, **get_params(train=True))\n self.val_loader = DataLoader(self.val_dataset, **get_params(train=False))\n self.test_loader = DataLoader(self.test_dataset, **get_params(train=False))\n \n\n def train_dataloader(self):\n return self.train_loader\n\n def val_dataloader(self):\n # return self.val_loader\n # currently returns validation and test set to track them during training\n return [self.val_loader, self.test_loader]\n\n def test_dataloader(self):\n return self.test_loader\n\n def predict_dataloader(self):\n return self.test_dataloader()\n\n @classmethod\n def add_data_specific_args(cls, parent_parser: ArgumentParser, **kwargs) -> ArgumentParser:\n parser = ArgumentParser(parents=[parent_parser], add_help=True, formatter_class=ArgumentDefaultsHelpFormatter)\n group = parser.add_argument_group(\"DataModule arguments\")\n group.add_argument(\"--dataset_split_num\", type=int, default=1) # dataset split, choose from 1, 2, or 3\n group.add_argument(\"--label_scaling_method\", default=\"standardization\", choices=[\"minmax\",\"standardization\"], help=\"label normalization strategy for a regression task (mean and std are automatically calculated using train set)\")\n group.add_argument(\"--image_path\", default=None, help=\"path to image datasets preprocessed for SwiFT\")\n group.add_argument(\"--bad_subj_path\", default=None, help=\"path to txt file that contains subjects with bad fMRI quality\")\n group.add_argument(\"--input_type\", default=\"rest\",choices=['rest','task'],help='refer to datasets.py')\n group.add_argument(\"--train_split\", default=0.7, type=float)\n group.add_argument(\"--val_split\", default=0.15, type=float)\n group.add_argument(\"--batch_size\", type=int, default=4)\n group.add_argument(\"--eval_batch_size\", type=int, default=16)\n group.add_argument(\"--img_size\", nargs=\"+\", default=[96, 96, 96, 20], type=int, help=\"image size (adjust the fourth dimension according to your --sequence_length argument)\")\n group.add_argument(\"--sequence_length\", type=int, default=20)\n group.add_argument(\"--stride_between_seq\", type=int, default=1, help=\"skip some fMRI volumes between fMRI sub-sequences\")\n group.add_argument(\"--stride_within_seq\", type=int, default=1, help=\"skip some fMRI volumes within fMRI sub-sequences\")\n group.add_argument(\"--num_workers\", type=int, default=8)\n group.add_argument(\"--with_voxel_norm\", type=str2bool, default=False)\n group.add_argument(\"--shuffle_time_sequence\", action='store_true')\n group.add_argument(\"--limit_training_samples\", type=int, default=None, help=\"use if you want to limit training samples\")\n return parser" } ]
import torch import torch.nn as nn import torch.nn.functional as F import os import json import numpy as np import torchvision import matplotlib.pyplot as plt from PIL import Image from tqdm import tqdm from matplotlib.colors import LinearSegmentedColormap from torchvision import models from torchvision import transforms from captum.attr import IntegratedGradients from captum.attr import GradientShap from captum.attr import Occlusion from captum.attr import NoiseTunnel from captum.attr import visualization as viz from matplotlib.colors import LogNorm from project.module.models.swin4d_transformer_ver7 import SwinTransformer4D from project.module.pl_classifier import LitClassifier from project.module.utils.data_module import fMRIDataModule from pathlib import Path
12,978
save_dir = # write path to save_dir jobid = # write project number neptune_project_id = # write project id. ex)user_id/project_name for i in Path(f'SwiFT/output/{neptune_project_id}/RSTOT-{jobid}/').glob('checkpt*'): ckpt_path = i ckpt = torch.load(ckpt_path, map_location='cuda:0' if torch.cuda.is_available() else 'cpu') ckpt['hyper_parameters']['image_path'] = # write path to MNI_to_TRs folder ckpt['hyper_parameters']['default_root_dir'] = # write path to use default_root_dir ckpt['hyper_parameters']['shuffle_time_sequence'] = False ckpt['hyper_parameters']['time_as_channel'] = False ckpt['hyper_parameters']['eval_batch_size'] = 1 args = ckpt['hyper_parameters'] model = LitClassifier(**args) model.cuda(0) if torch.cuda.is_available() else model model.load_state_dict(ckpt['state_dict']) integrated_gradients = IntegratedGradients(model) noise_tunnel = NoiseTunnel(integrated_gradients) kwargs = { "nt_samples": 5, "nt_samples_batch_size": 5, "nt_type": "smoothgrad_sq", # 1 #"stdevs": 0.05, "internal_batch_size": 5, }
save_dir = # write path to save_dir jobid = # write project number neptune_project_id = # write project id. ex)user_id/project_name for i in Path(f'SwiFT/output/{neptune_project_id}/RSTOT-{jobid}/').glob('checkpt*'): ckpt_path = i ckpt = torch.load(ckpt_path, map_location='cuda:0' if torch.cuda.is_available() else 'cpu') ckpt['hyper_parameters']['image_path'] = # write path to MNI_to_TRs folder ckpt['hyper_parameters']['default_root_dir'] = # write path to use default_root_dir ckpt['hyper_parameters']['shuffle_time_sequence'] = False ckpt['hyper_parameters']['time_as_channel'] = False ckpt['hyper_parameters']['eval_batch_size'] = 1 args = ckpt['hyper_parameters'] model = LitClassifier(**args) model.cuda(0) if torch.cuda.is_available() else model model.load_state_dict(ckpt['state_dict']) integrated_gradients = IntegratedGradients(model) noise_tunnel = NoiseTunnel(integrated_gradients) kwargs = { "nt_samples": 5, "nt_samples_batch_size": 5, "nt_type": "smoothgrad_sq", # 1 #"stdevs": 0.05, "internal_batch_size": 5, }
data_module = fMRIDataModule(**args)
2
2023-10-28 09:26:03+00:00
16k
TheCompAce/ShellSpeak
main.py
[ { "identifier": "VectorDatabase", "path": "modules/vectorDatabase.py", "snippet": "class VectorDatabase:\n def __init__(self, path, name):\n self.path = path\n self.name = name\n self.db_path = os.path.join(path, f'{name}.db')\n self.model_path = os.path.join(path, f'{name}.bin')\n \n # Ensure the path exists\n if not os.path.exists(path):\n os.makedirs(path)\n \n # Set up database and model connections\n self.conn = self.initialize_db()\n self.model = self.initialize_model()\n \n def initialize_db(self):\n try:\n conn = sqlite3.connect(self.db_path)\n c = conn.cursor()\n \n c.execute('CREATE TABLE IF NOT EXISTS responses (id INTEGER PRIMARY KEY, response TEXT, response_raw TEXT, trained BOOLEAN DEFAULT 0)')\n c.execute('CREATE INDEX IF NOT EXISTS idx_responses_trained ON responses (trained)') # Index on trained field\n c.execute('CREATE TABLE IF NOT EXISTS vector_data (id INTEGER PRIMARY KEY, vector BLOB, response_id INTEGER, FOREIGN KEY(response_id) REFERENCES responses(id))')\n c.execute('CREATE INDEX IF NOT EXISTS idx_vector_data_response_id ON vector_data (response_id)') # Index on response_id field\n\n # Check if timestamp column exists\n c.execute(\"PRAGMA table_info(responses)\")\n columns = [column[1] for column in c.fetchall()]\n if 'timestamp' not in columns:\n c.execute('ALTER TABLE responses ADD COLUMN timestamp DATETIME DEFAULT CURRENT_TIMESTAMP')\n \n conn.commit()\n\n return conn # Return the connection\n except Exception as e:\n logging.exception(f\"An error occurred in initialize_db: {e}\")\n\n \n def initialize_model(self):\n try:\n # Create a new Word2Vec model if it doesn't exist\n if not os.path.exists(self.model_path):\n # Assuming sentences is your data\n # Replace the following line with your data and model parameters\n sentences = [[\"hello\", \"world\"], [\"how\", \"are\", \"you\"], [\"goodbye\", \"world\"]]\n\n model = Word2Vec(sentences, min_count=1)\n model.save(self.model_path)\n else:\n model = Word2Vec.load(self.model_path)\n\n return model # Return the model\n except Exception as e:\n logging.exception(f\"An error occurred in initialize_model: {e}\")\n\n def store_short_term_memory(self, task_id, data):\n # Convert data to a string or JSON format\n data_str = json.dumps(data)\n # Store the data as a response in the VectorDatabase\n self.vector_db.create_response(data_str)\n\n def store_long_term_memory(self, task_data):\n # Convert task_data to a string or JSON format\n task_data_str = json.dumps(task_data)\n # Store the task_data as a response in the VectorDatabase\n self.create_response(task_data_str)\n\n def ensure_connection(self):\n if self.conn is None:\n self.conn = self.initialize_db()\n if self.model is None:\n self.model = self.initialize_model()\n \n def create_response(self, response_text):\n try:\n c = self.conn.cursor()\n \n preprocess_text = self.preprocess_text(response_text)\n now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n c.execute('INSERT INTO responses (response, response_raw, timestamp) VALUES (?, ?, ?)', (preprocess_text, response_text, now))\n response_id = c.lastrowid\n\n # Updated word check in vocabulary\n words = preprocess_text.split()\n vectors = [self.model.wv[word] for word in words if word in dict(self.model.wv.key_to_index)]\n if vectors:\n vector = np.mean(vectors, axis=0) # Averaging vectors of the words\n vector_bytes = vector.tobytes()\n c.execute('INSERT INTO vector_data (vector, response_id) VALUES (?, ?)', (vector_bytes, response_id))\n else:\n logging.info(\"No valid words found in the response for vectorization.\")\n\n self.conn.commit()\n c.close()\n except Exception as e:\n logging.exception(f\"An error occurred in create_response: {e}\")\n\n \n def search_response(self, search_text):\n c = self.conn.cursor()\n \n # Use the LIKE operator to search for the search_text in the response field\n c.execute(\"SELECT id, response FROM responses WHERE response LIKE ?\", ('%' + search_text + '%',))\n search_results = c.fetchall()\n c.close()\n return search_results\n \n def normalize_text(self, text):\n # Convert to lowercase\n text = text.lower()\n # Replace newline characters with spaces\n text = text.replace('\\\\n', ' ')\n # Remove special characters and digits using regex\n text = re.sub(r'[^a-zA-Z\\s]', ' ', text)\n # Remove extra whitespaces\n text = re.sub(r'\\s+', ' ', text).strip()\n # Tokenize the text\n tokens = text.split()\n # Remove stopwords\n tokens = [word for word in tokens if word not in stopwords.words('english')]\n # Perform stemming\n stemmer = PorterStemmer()\n tokens = [stemmer.stem(word) for word in tokens]\n # Join tokens back into a single string\n text = ' '.join(tokens)\n return text\n\n def close_connection(self):\n \"\"\"Close the database connection gracefully.\"\"\"\n try:\n if self.conn:\n self.conn.close()\n\n self.conn = None\n except Exception as e:\n logging.exception(f\"An error occurred while closing the connection: {e}\")\n\n\n def preprocess_text(self, text):\n \"\"\"Example preprocessing function (can be expanded).\"\"\"\n # Placeholder for any preprocessing steps you want to implement\n return self.normalize_text(text)\n\n def get_vector(self, response_id):\n \"\"\"Retrieve vector data for a given response_id.\"\"\"\n c = self.conn.cursor()\n \n c.execute('SELECT vector FROM vector_data WHERE response_id = ?', (response_id,))\n vector_data = c.fetchone()\n c.close()\n \n if vector_data is None:\n error_message = f\"No vector data found for response_id {response_id}\"\n logging.error(error_message)\n raise ValueError(error_message)\n \n vector = np.frombuffer(vector_data[0], dtype=np.float32) # Assuming the vector data is stored as float32\n \n return vector\n\n \n def read_response(self, response_id):\n c = self.conn.cursor()\n \n c.execute('SELECT response FROM responses WHERE id = ?', (response_id,))\n response = c.fetchone()\n\n c.close()\n \n if response is None:\n error_message = f\"No response found for response_id {response_id}\"\n logging.error(error_message)\n raise ValueError(error_message)\n \n return response[0]\n \n def update_response(self, response_id, new_response_text):\n try:\n c = self.conn.cursor()\n \n normalized_text = self.preprocess(new_response_text)\n c.execute('UPDATE responses SET response = ? WHERE id = ?', (normalized_text, response_id))\n \n # Check if each word is in the model's vocabulary\n words = normalized_text.split()\n vectors = [self.model.wv[word] for word in words if word in dict(self.model.wv.key_to_index)]\n if vectors:\n vector = np.mean(vectors, axis=0) # Averaging vectors of the words\n vector_bytes = vector.tobytes()\n c.execute('UPDATE vector_data SET vector = ? WHERE response_id = ?', (vector_bytes, response_id))\n \n self.conn.commit()\n c.close()\n except Exception as e:\n logging.exception(f\"An error occurred in update_response: {e}\")\n \n def delete_response(self, response_id):\n try:\n c = self.conn.cursor()\n \n c.execute('DELETE FROM vector_data WHERE response_id = ?', (response_id,))\n c.execute('DELETE FROM responses WHERE id = ?', (response_id,))\n \n self.conn.commit()\n\n c.close()\n except Exception as e:\n logging.exception(f\"An error occurred in delete_response: {e}\")\n\n def train_untrained_responses(self):\n try:\n c = self.conn.cursor()\n\n c.execute(\"SELECT response FROM responses WHERE trained = 0\")\n untrained_responses = c.fetchall()\n if untrained_responses:\n sentences = [response[0].split() for response in untrained_responses]\n\n self.model.build_vocab(sentences, update=True)\n self.model.train(sentences, total_examples=len(sentences), epochs=self.model.epochs)\n\n self.model.save(self.model_path)\n\n c.execute(\"UPDATE responses SET trained = 1 WHERE trained = 0\")\n\n self.conn.commit()\n else:\n logging.info(\"No untrained responses found.\")\n\n c.close()\n except Exception as e:\n logging.exception(f\"An error occurred in train_untrained_responses: {e}\")\n\n def needs_index_update(self):\n \"\"\"\n Check if there are any untrained responses in the database.\n If there are, it means the index needs to be updated.\n Returns True if update is needed, False otherwise.\n \"\"\"\n try:\n c = self.conn.cursor()\n c.execute(\"SELECT COUNT(*) FROM responses WHERE trained = 0\")\n count = c.fetchone()[0]\n c.close() # Manually close the cursor\n return count > 0\n except Exception as e:\n logging.exception(f\"An error occurred in needs_index_update: {e}\")\n return False # In case of an error, you might want to handle it differently\n\n \n def reset_training_status(self):\n \"\"\"Reset the trained status of all responses to untrained.\"\"\"\n try:\n c = self.conn.cursor()\n \n c.execute(\"UPDATE responses SET trained = 0\")\n \n self.conn.commit()\n\n c.close()\n except Exception as e:\n logging.exception(f\"An error occurred in reset_training_status: {e}\")\n\n\n def search_word_vector(self, word):\n try:\n if word in self.model.wv.key_to_index:\n similar_words = self.model.wv.similar_by_word(word)\n return similar_words\n else:\n logging.error(f\"The word {word} is not in the model's vocabulary.\")\n return []\n except Exception as e:\n logging.exception(f\"An error occurred in search_word_vector: {e}\")\n return []\n\n def get_vector_average(self, text):\n words = text.split()\n vectors = [self.model.wv[word] for word in words if word in dict(self.model.wv.key_to_index)]\n if vectors:\n vector_avg = np.mean(vectors, axis=0)\n return vector_avg\n else:\n return np.zeros(self.model.vector_size)\n\n def search_similar_conversations(self, text, top_n=1):\n processed_text = self.preprocess_text(text)\n print(f\"processed_text = {processed_text}\")\n\n query_vector = self.get_vector_average(processed_text)\n with self.conn:\n c = self.conn.cursor()\n c.execute('SELECT id, vector FROM vector_data')\n vector_data = c.fetchall()\n\n if not vector_data:\n return []\n\n ids, vectors = zip(*vector_data)\n vectors = np.array([np.frombuffer(vector, dtype=np.float32) for vector in vectors])\n similarities = cosine_similarity([query_vector], vectors)[0]\n sorted_indices = np.argsort(similarities)[::-1]\n top_indices = sorted_indices[:top_n]\n top_ids = [ids[i] for i in top_indices]\n top_similarities = [similarities[i] for i in top_indices]\n\n result = []\n for response_id, similarity in zip(top_ids, top_similarities):\n # Fetch the corresponding response text for each response_id\n c.execute('SELECT response_raw FROM response_raw WHERE id = ?', (response_id,))\n response_text = c.fetchone()\n if response_text is not None:\n response_text = response_text[0] # Extracting text from the tuple\n # result.append((response_id, response_text, similarity))\n result.append(response_text)\n\n return result" }, { "identifier": "save_settings", "path": "modules/menus/setup_menu.py", "snippet": "def setup_menu():" }, { "identifier": "ShellSpeak", "path": "modules/shellSpeak.py", "snippet": "class ShellSpeak:\n def __init__(self, settings, base_path, vectorDb):\n self.llm_len = int(settings.get(\"llm_size\", 14000))\n self.llm_history_len = int(settings.get(\"llm_history_size\", 4000))\n self.llm_file_len = int(settings.get(\"llm_file_size\", 4000))\n self.llm_folder_len = int(settings.get(\"llm_folder_size\", 4000))\n self.llm_slide_len = int(settings.get(\"llm_slide_len\", 120))\n\n self.temp_file = settings.get(\"temp_file\", \"temp\")\n\n self.llm_output_size = int(settings.get(\"llm_output_size\", 4097))\n self.use_cache = settings.get(\"use_cache\", False)\n self.cache_file = settings.get(\"cache_file\", None)\n\n self.vector_for_commands = settings.get(\"vector_for_commands\", False)\n self.vector_for_history = settings.get(\"vector_for_history\", True)\n self.vector_for_folders = settings.get(\"vector_for_folders\", True)\n\n self.data_file = 'path_to_your_data_file.json'\n self.use_indexing = settings.get('use_indexing', False)\n\n self.vector_db = vectorDb\n\n self.settings = settings\n self.command_history = \"\"\n self.settingsRoot = base_path\n\n self.files = []\n\n self.llm = LLM(model_type=ModelTypes(self.settings.get('model', \"OpenAI\")), use_cache=self.use_cache, cache_file=self.cache_file) #Zephyr7bBeta\n\n self.command_runner = CommandRunner(self)\n\n logging.info(f\"Shell Speak Loaded\")\n\n def capture_input(self):\n # Get current working directory\n current_directory = os.getcwd()\n \n # Get environment (if available)\n environment = os.environ.get('VIRTUAL_ENV', None)\n if environment:\n environment = os.path.basename(environment) # Extracting last part of the path as environment name\n \n # Formatted prompt\n prompt = f\"[green]({environment})[cyan] {current_directory}[white]>\" if environment else f\"{current_directory}{self.settings['command_prompt']}\"\n \n set_input = capture_styled_input(prompt)\n logging.info(f\"Using input : {set_input}\")\n return set_input\n \n def show_file(self, caption, body):\n print_colored_text(f\"[yellow]==== {caption} ====\")\n num_width = len(str(len(body)))\n for line_number, line in enumerate(body, 1): # Start counting from 1\n print_colored_text(f'[yellow]{line_number:{num_width}}:[cyan] {line}') # Adjust the format as needed\n print_colored_text(\"[yellow]====================\")\n\n\n def detect_language(self, code):\n try:\n lexer = lexers.guess_lexer(code)\n return lexer.name\n except lexers.ClassNotFound:\n return None\n \n async def execute_python_script(self, python_section, filename):\n lines = python_section.split('\\n')\n if len(lines) == 1:\n # Single-line script, execute directly\n script = lines[0]\n # script = f\"{self.settings['python_command_prompt']}\\n{script}\"\n output = await self.run_python_script(script)\n return output\n else:\n # Multi-line script, create a python file\n python_filename = f'{self.temp_file}.py'\n if filename:\n # Use commented out filename\n check_filename = filename\n \n if (is_valid_filename(check_filename)):\n python_filename = filename\n\n script = '\\n'.join(lines)\n script = f\"{self.settings['python_command_prompt']}\\n{script}\"\n\n with open(python_filename, 'w') as python_file:\n python_file.write(script)\n\n self.show_file(\"Python File\", script.split('\\n'))\n user_confirmation = capture_styled_input(\"[yellow]Are you sure you want to run this Python script? (yes/no): \")\n if user_confirmation.lower() != 'yes':\n if python_filename == f'{self.temp_file}.py':\n os.remove(python_filename) # Remove temporary python file\n return CommandResult(\"\", \"Run python file Canceled.\")\n \n output = await self.run_python_script(python_filename)\n if python_filename == f'{self.temp_file}.py':\n os.remove(python_filename) # Remove temporary python file\n return output\n \n async def run_python_script(self, script):\n # If the script is a file, use 'python filename.py' to execute\n if script.endswith('.py'):\n command = f'python -u {script}'\n else:\n command = f'python -u -c \"{script}\"'\n result = await self.run_command(command)\n return CommandResult(result.out, result.err)\n \n def extract_script_command(self, script_type, text):\n match = re.search(rf'```{script_type}(.*?)```', text, re.DOTALL)\n if match:\n shell_section = match.group(1).strip()\n else:\n logging.error(f\"No {script_type} section found\")\n shell_section = None\n\n return shell_section\n\n \n \n\n async def execute_shell_section(self, shell_section, filename):\n\n logging.info(f\"Executing Shell Section : {shell_section}\")\n\n shell_section.strip()\n\n lines = shell_section.split('\\n')\n ret_value = CommandResult(\"\", \"\")\n \n if len(lines) == 1:\n # Single-line command, execute directly\n command = lines[0]\n\n ret_value = await self.run_command(command)\n logging.error(f\"Execute Shell Directory Line Strip: {ret_value}\")\n\n else:\n # Multi-line command, create a batch file\n batch_filename = f'{self.temp_file}.bat'\n if lines[0].startswith('REM '):\n # Use commented out filename\n batch_filename = lines[0][4:].strip()\n # lines = lines[1:] # Remove the filename line\n\n logging.info(f\"batch_filename : {batch_filename}\")\n with open(batch_filename, 'w') as batch_file:\n batch_file.write('\\n'.join(lines))\n self.show_file(\"Batch File\", lines)\n user_confirmation = capture_styled_input(\"[yellow]Are you sure you want to run this batch file? (yes/no): \")\n logging.info(f\"user_confirmation : {user_confirmation}\")\n if user_confirmation.lower() != 'yes':\n return CommandResult(\"\", \"Run batch file Canceled.\")\n ret_value = await self.run_command(batch_filename)\n \n logging.info(f\"command output : out: {ret_value.out}, err: {ret_value.err}\")\n if batch_filename == f'{self.temp_file}.bat':\n os.remove(batch_filename) # Remove temporary batch file\n logging.info(f\"removing : {batch_filename}\")\n\n return ret_value\n \n def create_process_group(self):\n # Create a new process group\n process_group_id = os.set_handle_inheritance(0, 1)\n return process_group_id\n\n async def run_command(self, command):\n command += \" && cd\"\n logging.info(f\"run command : {command}\")\n\n stdout, stderr = await self.command_runner.run(command)\n\n \n\n if stderr == \"\":\n lines = stdout.strip().split(\"\\n\")\n if lines:\n new_dir = lines[-1] # Assuming the last line of output contains the new working directory\n if os.path.isdir(new_dir):\n os.chdir(new_dir) # Change to the new working directory in your parent process\n # Remove the last line containing the new directory from the output\n lines = lines[:-1]\n stdout = '\\n'.join(lines)\n else:\n logging.error(f\"Invalid directory: {new_dir}\")\n else:\n logging.error(\"No output to determine the new working directory\")\n\n if stdout.find(\"Traceback (most recent call last):\") > -1:\n stderr = stdout\n stdout = command\n else:\n stderr = f\"Command : {command}, Error: {stderr}\"\n\n logging.info(f\"run return : out: {stdout}, err: {stderr}\")\n\n ret_val = CommandResult(stdout, stderr)\n return ret_val\n \n \n def format_for_display(self, input, output):\n timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n self.command_history += f\"History: [Time: {timestamp}\\nInput: {input}\\nOutput: {output}]\\n\"\n self.display_output(output)\n\n\n def shrink_file_data(self, file_data, target_tokens):\n # Get the current token count of file_data\n current_tokens = get_token_count(file_data)\n\n if current_tokens > target_tokens:\n # Estimate the number of characters to keep based on the average token length\n average_token_length = len(file_data) / current_tokens\n chars_to_keep = int(target_tokens * average_token_length)\n \n # Only keep the last part of file_data\n truncated_data = file_data[-chars_to_keep:]\n return truncated_data\n\n # If the file_data is already within the limit, return it as is\n return file_data\n\n\n def find_relevant_data(file_data, target_tokens):\n # Your logic here to find relevant information within the token count\n return file_data[:target_tokens]\n\n def expand_directories(self, file_paths, exclusions):\n new_file_list = []\n for file_path in file_paths:\n if os.path.isdir(file_path):\n # If the path is a directory, ask the user whether to include its files\n user_decision = input(f\"The path '{file_path}' is a directory. Do you want to add all files in this directory? (y/n): \")\n if user_decision.lower() == 'y':\n # If yes, walk through the directory and add all files\n for root, dirs, files in os.walk(file_path):\n # Remove excluded directories so os.walk doesn't traverse them\n dirs[:] = [d for d in dirs if d not in exclusions]\n for name in files:\n if name not in exclusions:\n new_file_list.append(os.path.join(root, name))\n else:\n # If no, inform the user that the directory is being skipped\n print_colored_text(f\"[blue]Skipping directory '{file_path}'.\")\n else:\n # If the path is a file, just add it to the list\n if os.path.basename(file_path) not in exclusions:\n new_file_list.append(file_path)\n return new_file_list\n\n\n def string_sizer(self, data, context, length=1024, use_vector=True):\n set_data = data.strip()\n token_count = get_token_count(set_data)\n print(f\"token_count = {token_count}\")\n if token_count > length:\n if use_vector:\n relevant_segments = self.vector_db.search_similar_conversations(context, top_n=length)\n # relevant_segments = find_relevant_file_segments(\n # history_text= context,\n # file_data=set_data,\n # window_size=length, # or any other size you deem appropriate (8124)\n # overlap=self.llm_slide_len, # or any other overlap size you deem appropriate\n # top_k=1 # or any other number of segments you deem appropriate\n # )\n # set_data = '\\n'.join([f\"[{item[0]}, {item[1]}, {item[2]}]\" for item in relevant_segments])\n\n set_data = '/n.../n'.join(relevant_segments)\n else:\n set_data = trim_to_right_token_count(set_data, len)\n \n data_tokens = get_token_count(set_data)\n logging.info(f\"Translate to Command History Token Count : {data_tokens}\")\n return data_tokens, set_data\n\n async def translate_to_command(self, user_input):\n user_command_prompt = self.settings['user_command_prompt']\n send_prompt = self.settings['command_prompt']\n max_llm = (self.llm_len - 80) #80 is used to pad json formatting of System Messages and over all prompt size.\n \n max_llm -= get_token_count(send_prompt)\n max_llm -= get_token_count(user_input)\n \n history_tokens, command_history = self.string_sizer(self.command_history, user_input, self.llm_history_len, self.vector_for_history)\n command_history = json.dumps(command_history)\n max_llm -= history_tokens\n\n # Add get folders/Files\n current_directory = os.getcwd()\n folder_list = list_files_and_folders_with_sizes(current_directory)\n folder_list = {\n \"path\": current_directory,\n \"folder_list\": folder_list\n }\n folder_list = json.dumps(folder_list)\n folder_list_tokens, folder_list = self.string_sizer(folder_list, command_history + \"/n\" + user_input, self.llm_folder_len, self.vector_for_commands)\n folder_list = json.dumps(folder_list)\n max_llm -= folder_list_tokens\n\n set_command_files_data = []\n total_tokens = 0\n\n # Extract file paths and exclusion list from user_input\n file_paths = re.findall(r'file:\\s*(\".*?\"|\\S+)', user_input)\n \n # Remove quotes from file paths, if present\n self.files = [fp.strip('\"') for fp in file_paths]\n for f, file in enumerate(self.files):\n exclusions = file.split(',')\n file_path = exclusions[0]\n\n exclusions.pop(0)\n self.files[f] = file_path\n self.exclusions = exclusions\n self.files = self.expand_directories(self.files, self.exclusions)\n\n # Use the new function to expand directories into file lists\n self.files = self.expand_directories(self.files, self.exclusions)\n\n if len(self.files) > 0:\n total_size = 0\n total_data = \"\"\n files_data = []\n \n for file in self.files:\n file_data_content = read_file(file) # Note: Changed to 'file_data_content'\n if len(file_data_content) > 50000: #Cap for NLP = 1000000\n # Prompt the user for a decision\n include_file = input(f\"The file {file} is very large. Do you want to include it? (yes/no): \")\n if include_file.lower() != 'yes' or include_file.lower() != 'y':\n print_colored_text(f\"[yellow]Skipping file: {file}\")\n continue # Skip the rest of the loop and therefore the file\n\n\n file_data = {\n \"file\": file,\n \"file_data\": file_data_content,\n \"file_size\": int(get_file_size(file)),\n \"file_tokens\": get_token_count(file_data_content) # Note: Changed to 'file_data_content'\n }\n \n total_size += file_data[\"file_size\"]\n total_data += file_data[\"file_data\"]\n\n files_data.append(file_data)\n\n # Sort files_data by file_tokens in descending order\n files_data = sorted(files_data, key=lambda x: x['file_tokens'], reverse=True)\n\n remaining_tokens = self.llm_file_len\n remaining_tokens_split = int(remaining_tokens / len(files_data)) + 1\n new_files_data = []\n for f, file in enumerate(files_data):\n if file[\"file_tokens\"] > remaining_tokens_split:\n file[\"fileIndex\"] = f\n file[\"file_tokens\"] = remaining_tokens_split\n new_files_data.append(file)\n else:\n remaining_tokens -= file[\"file_tokens\"]\n div_val = (len(files_data) - (len(files_data) - len(new_files_data)))\n if div_val == 0:\n div_val = 1\n\n remaining_tokens_split = int(remaining_tokens / div_val)\n \n if len(new_files_data) > 0:\n for new_file in new_files_data:\n print_colored_text(f\"[cyan]File {new_file['file']} Trimming\")\n relevant_segments = self.vector_db.search_similar_conversations(new_file['file_data'])\n # relevant_segments = find_relevant_file_segments(\n # history_text=folder_list + \"\\n\" + command_history + \"\\n\"+ user_input,\n # file_data=new_file['file_data'],\n # window_size=new_file['file_tokens'], # or any other size you deem appropriate (8124)\n # overlap=self.llm_slide_len, # or any other overlap size you deem appropriate\n # top_k=1 # or any other number of segments you deem appropriate\n # )\n new_file['file_data'] = '/n.../n'.join(relevant_segments)\n file_data_content = new_file['file_data']\n \n new_file['file_tokens'] = get_token_count(file_data_content)\n\n files_data[new_file[\"fileIndex\"]] = new_file\n\n total_tokens = 0\n for file_data in files_data:\n total_tokens += file_data[\"file_tokens\"]\n\n # Check if the file_data is binary and encode it with base64 if so\n try:\n # This will work if 'file_data' is text\n encoded_data = json.dumps(file_data['file_data'])\n except TypeError:\n # If 'file_data' is binary, encode it with base64\n encoded_data = base64.b64encode(file_data['file_data']).decode('utf-8')\n\n add_command_files_data = {\n \"file:\": file_data[\"file\"],\n \"data:\": encoded_data\n }\n\n set_command_files_data.append(add_command_files_data)\n \n\n command_files_data = json.dumps(set_command_files_data)\n logging.info(f\"Translate to Command File Token Count : {total_tokens}\")\n\n max_llm -= total_tokens\n\n commands = map_possible_commands()\n command_tokens, commands = self.string_sizer(commands, command_files_data + \"\\n\" + folder_list + \"\\n\" + command_history + \"\\n\"+ user_input, max_llm, self.vector_for_commands)\n \n command_tokens = get_token_count(commands)\n logging.info(f\"Translate to Command Commands Token Count : {command_tokens}\")\n \n logging.info(f\"Translate to Command : {user_input}\")\n\n kwargs = {\n 'user_prompt': user_input,\n 'get_os_name': get_os_name(),\n 'commands': commands,\n 'command_history': command_history,\n 'command_files_data': command_files_data,\n 'current_folders_data': folder_list\n }\n user_command_prompt = replace_placeholders(user_command_prompt, **kwargs)\n system_command_prompt = replace_placeholders(send_prompt, **kwargs)\n\n user_tokens = get_token_count(user_command_prompt)\n system_tokens = get_token_count(system_command_prompt)\n logging.info(f\"Translate to Command User Token Count : {user_tokens}\")\n logging.info(f\"Translate to Command System Token Count : {system_tokens}\")\n\n logging.info(f\"Translate to Command use System Prompt : {system_command_prompt}\")\n logging.info(f\"Translate to Command use User Prompt : {user_command_prompt}\")\n # command_output = self.llm.ask(system_command_prompt, user_command_prompt, model_type=ModelTypes(self.settings.get('model', \"OpenAI\")), return_type=\"json_object\")\n # loop = asyncio.get_event_loop()\n # command_output = await loop.run_in_executor(None, lambda: self.llm.ask(system_command_prompt, user_command_prompt, model_type=ModelTypes(self.settings.get('model', \"OpenAI\"))))\n command_output = await self.llm.async_ask(system_command_prompt, user_command_prompt, model_type=ModelTypes(self.settings.get('model', \"OpenAI\")), return_type=\"json_object\")\n # save_history_data(user_command_prompt, f\"User : {system_command_prompt}\", self.settings)\n self.vector_db.store_long_term_memory(f\"System : {system_command_prompt}\\n User : {user_command_prompt}\")\n logging.info(f\"Translate to Command return Response : {command_output}\")\n\n display_content = \"\"\n display_error = None\n try:\n if not isinstance(command_output, str):\n # Convert non-string command_output to a JSON-formatted string\n command_output_obj = {\n \"type\": \"Unknown\",\n \"Content\": f\"{command_output}\"\n }\n try:\n command_output_obj = json.loads(command_output)\n except json.JSONDecodeError as e:\n # Handle JSON decoding error if it occurs\n # You might want to log this error or handle it as per your application's needs\n command_output_obj = {\"type\": \"Error\", \"content\": str(e)}\n\n\n logging.info(f\"Translate return Response : {command_output}\")\n type = command_output_obj[\"type\"]\n content = command_output_obj.get(\"content\", None)\n err = content.get(\"error\", None)\n\n if not err:\n if type == \"command_execution\":\n command = content[\"command\"]\n if len(command) > 6 and command[:6] == \"python\":\n while True:\n run_as_mod = capture_styled_input(\"[yellow]Do you want to add our compatibility code? (yes/no/exit) :\")\n run_as_code = False\n cancel_run = False\n if run_as_mod == \"yes\" or run_as_mod == \"y\":\n run_as_code = True\n break\n elif run_as_mod == \"no\" or run_as_mod == \"n\":\n run_as_code = False\n break\n elif run_as_mod == \"exit\":\n cancel_run = True\n break\n else:\n print_colored_text(\"[red]Invalid Input!\")\n\n if not cancel_run:\n if run_as_code:\n # Extract the Python script or module name from the command\n command_parts = command_output.split()\n script_name = None\n for i, part in enumerate(command_parts):\n if part.endswith(\".py\"):\n script_name = part\n break\n elif part == \"-m\" and i < len(command_parts) - 1:\n script_name = command_parts[i + 1] + \".py\" # Assuming the module name is a Python file name\n break\n\n # Open and read the script if the name is found\n if script_name:\n try:\n with open(script_name, 'r') as file:\n python_code = file.read()\n\n\n # Now, python_code contains the content of the Python file\n # You can now pass this code to execute_python_script function\n display_content = await self.execute_python_script(python_code)\n\n except FileNotFoundError:\n print_colored_text(f\"[red]Error: The file {script_name} was not found.\")\n logging.info(f\"Translate Command Error: The file {script_name} was not found.\")\n except Exception as e:\n print_colored_text(f\"[red]Error: An error occurred while reading the file {script_name}: {e}\")\n logging.info(f\"Translate Command Error: An error occurred while reading the file {script_name}: {e}\")\n else:\n print_colored_text(\"[red]Error: No Python script name could be extracted from the command.\")\n logging.info(f\"Translate Command Error: No Python script name could be extracted from the command.\")\n else:\n success, command_output = await self.execute_command(command_output)\n if not success:\n print_colored_text(f\"[red]Exe Error: {command_output.err}\")\n display_content = command_output.err\n else:\n display_content = command_output.out\n logging.info(f\"Translate Command Execute : {command_output}\")\n else:\n logging.info(f\"Translate Command Canceled : {command_output}\")\n else:\n success, command_output = await self.execute_command(command)\n if not success and command_output.err.strip() != \"\":\n print_colored_text(f\"[red]Exe Error: {command_output.err}\")\n display_content = command_output.err\n else:\n display_content = command_output.out\n logging.info(f\"Translate Command Execute : {display_content}\")\n pass\n elif type == \"script_creation\":\n script_text = content['script']\n script_type = content['script_type']\n script_filename = content.get('script_filename', None)\n\n if script_type == \"shell\" or script_type == \"batch\" or script_type == \"bash\":\n display_content = await self.execute_shell_section(script_text, script_filename)\n elif script_type == \"python\":\n display_content = await self.execute_python_script(script_text, script_filename)\n else:\n display_content = CommandResult(script_text, f\"Invalid Script Type : {script_type}\")\n\n if command_output.err != \"\":\n print_colored_text(f\"[red]Shell Error: {command_output.err} with {command_output.out}\")\n display_content = command_output.err\n else: \n display_content = command_output.out\n\n logging.info(f\"Translate Shell Execute : {command_output}\")\n elif type == \"response_formatting\":\n display_content = content[\"text\"]\n elif type == \"error_handling\":\n display_content = content[\"type\"]\n display_error = err\n else:\n display_content = command_output\n display_error = f\"Invalid command type '{type}'.\"\n else:\n display_content = command_output\n display_error = err\n logging.info(f\"Translate to Command Object Error : {err}, command_output= {command_output}\")\n\n\n except Exception as e:\n display_content = command_output\n display_error = e\n logging.info(f\"Translate to Command Object Error : {e}, command_output= {command_output}\")\n\n logging.info(f\"Translate to Command Display Content : {display_content}\")\n\n if display_error:\n return display_error\n \n return display_content\n \n def check_script(self, code_type, text):\n command_output = text\n if f'```{code_type}' in text:\n command_output = self.extract_script_command(code_type, text)\n logging.info(f\"Translate '{code_type}' Code : {text}\")\n\n return command_output\n\n async def execute_command(self, command):\n try:\n logging.info(f\"Execute Command : {command}\")\n result = await self.run_command(command)\n if result.err:\n logging.info(f\"Execute Error : {result.err}\")\n return False, result\n \n logging.info(f\"Execute Output : {result.out}\")\n\n return True, result\n except Exception as e:\n return False, CommandResult(\"\", str(e))\n\n def translate_output(self, output, is_internal=False):\n logging.info(f\"Translate Output : {output}\")\n send_prompt = self.settings['display_prompt']\n\n total_tokens = self.llm_output_size - (get_token_count(send_prompt) + get_token_count(output) + 80)\n\n set_command_history = self.command_history\n token_count = get_token_count(set_command_history)\n\n if token_count > total_tokens:\n set_command_history = trim_to_right_token_count(set_command_history, total_tokens)\n\n max_llm = (self.llm_len - 80) #80 is used to padd json formatting of System Messages and over all prompt size.\n \n max_llm -= get_token_count(send_prompt)\n max_llm -= get_token_count(output)\n \n history_tokens, command_history = self.string_sizer(self.command_history, output, self.llm_history_len)\n command_history = json.dumps(command_history)\n max_llm -= history_tokens\n\n # Add get folders/Files\n current_directory = os.getcwd()\n folder_list = list_files_and_folders_with_sizes(current_directory)\n folder_list = {\n \"path\": current_directory,\n \"folder_list\": folder_list\n }\n folder_list = json.dumps(folder_list)\n folder_list_tokens, folder_list = self.string_sizer(folder_list, self.command_history + \"/n\" + output, self.llm_folder_len)\n folder_list = json.dumps(folder_list)\n max_llm -= folder_list_tokens\n\n kwargs = {\n 'get_os_name': get_os_name(),\n 'command_history': set_command_history,\n 'internal_script': str(is_internal)\n }\n send_prompt = replace_placeholders(send_prompt, **kwargs)\n\n logging.info(f\"Translate Output Display System Prompt : {send_prompt}\")\n logging.info(f\"Translate Output Display User Prompt : {output}\")\n display_output = self.llm.ask(send_prompt, output, model_type=ModelTypes(self.settings.get('model', \"OpenAI\")), return_type=\"text\")\n # save_history_data(output, f\"Assistant : {send_prompt}\", self.settings)\n self.vector_db.store_long_term_memory(f\"System : {send_prompt}\\n User : {output}\")\n\n logging.info(f\"Translate Output Display Response : {display_output}\")\n return display_output\n\n def display_output(self, output):\n logging.info(f\"Display Output : {output}\")\n print_colored_text(output)\n\n def display_about(self):\n print_colored_text(\"[bold][yellow]======================================================\\nShellSpeak\\n======================================================\\n[white]AI powered Console Input\\nVisit: https://github.com/TheCompAce/ShellSpeak\\nDonate: @BradfordBrooks79 on Venmo\\n\\n[grey]Type 'help' for Help.\\n[yellow]======================================================\\n\")\n\n def display_help(self):\n print_colored_text(\"[bold][yellow]======================================================\\nShellSpeak Help\\n======================================================\\n[white]Type:\\n'exit': to close ShellSpeak\\n'user: /command/': pass a raw command to execute then reply threw the AI\\n'file: /filepath/': adds file data to the command prompt. (use can send a folder path, using ',' to exclude folders and files.)\\n'clm': Clear command Memory\\n'rset': Reloads the settings file (this happens on every loading of the prompt.)\\n'about': Shows the About Information\\n'help': Shows this Help information.\\n[yellow]======================================================\\n\")\n\n async def run(self):\n self.display_about()\n while True:\n self.settings = load_settings(self.settingsRoot)\n self.files = []\n\n user_input = self.capture_input()\n if user_input.lower() == 'exit':\n break\n elif user_input.lower() == 'about':\n self.display_about()\n elif user_input.lower() == 'help':\n self.display_help()\n elif user_input.lower() == 'rset':\n self.display_output(f\"Settings Updated.\")\n elif user_input.lower() == 'rset':\n self.display_output(f\"Settings Updated.\")\n elif user_input.lower() == 'clm':\n self.command_history = \"\"\n # self.command_history += f\"Command Input: {user_input}\\nCommand Output: Command History cleared.\\n\"\n self.display_output(f\"Command Memory (History) Cleared.\")\n else:\n timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n if user_input.lower().startswith('user: '):\n # Bypass AI translation and send raw command to the OS\n raw_command = user_input[6:] # Extract the command part from user_input\n try:\n result = await self.run_command(raw_command)\n except Exception as e:\n translated_command = e\n translated_output = self.translate_output(result.out)\n self.command_history += f\"History: [Time: {timestamp}\\nInput: {user_input}\\nOutput: {result.out} Error: {result.err}]\\n\"\n # self.display_output(f\"Output:\\n{result.out}\\nError:\\n{result.err}\")\n self.display_output(translated_output)\n else:\n # Continue with AI translation for the command\n try:\n translated_command = await self.translate_to_command(user_input)\n except Exception as e:\n translated_command = {\n \"err\" : \"Invalid user_input!\",\n \"out\": e\n }\n # if translated_command.err == \"\":\n # translated_output = self.translate_output(translated_command)\n # self.command_history += f\"Command Input: {user_input}\\nCommand Output: {translated_output}\\n\"\n # self.display_output(translated_output)\n #else:\n user_input = redact_json_values(user_input, [\"run_command_list\", \"command_files\"])\n\n self.command_history += f\"History: [Time: {timestamp}\\nInput: {user_input}\\nOutput: {translated_command}]\\n\"\n if not isinstance(translated_command, str):\n translated_command = str(translated_command) # Convert non-string output to string\n translated_output = self.translate_output(translated_command)\n self.display_output(translated_output)" }, { "identifier": "load_settings", "path": "modules/utils.py", "snippet": "def load_settings(filepath):\n try:\n with open(os.path.join(filepath, \"settings.json\"), 'r') as f:\n settings = json.load(f)\n chk_file = os.path.join(filepath, settings['command_prompt'])\n if os.path.isfile(chk_file):\n with open(chk_file, 'r') as f:\n settings['command_prompt'] = f.read()\n \n chk_file = os.path.join(filepath, settings['display_prompt'])\n if os.path.isfile(chk_file):\n with open(chk_file, 'r') as f:\n settings['display_prompt'] = f.read()\n\n chk_file = os.path.join(filepath, settings['user_command_prompt'])\n if os.path.isfile(chk_file):\n with open(chk_file, 'r') as f:\n settings['user_command_prompt'] = f.read()\n\n chk_file = os.path.join(filepath, settings['python_command_prompt'])\n if os.path.isfile(chk_file):\n with open(chk_file, 'r') as f:\n settings['python_command_prompt'] = f.read()\n\n return settings\n except FileNotFoundError:\n return {}" } ]
import json import os import sys import asyncio import json from modules.vectorDatabase import VectorDatabase from datetime import datetime from modules.menus.setup_menu import save_settings, setup_menu from modules.shellSpeak import ShellSpeak from modules.utils import load_settings
11,060
# from modules.vectors import load_faiss_index, build_and_save_faiss_index, load_index_data, needs_index_update def run_async_function(func, *args): asyncio.run(func(*args)) async def start_shell_speak(settings, base_path, vector_db): await main_start(settings, base_path, vector_db) async def main_start(settings, base_path, vector_db): # Initialize VectorDatabase here if needed globally shellSpeak = ShellSpeak(settings, base_path, vector_db) await shellSpeak.run() def main(): base_path = os.path.abspath(".")
# from modules.vectors import load_faiss_index, build_and_save_faiss_index, load_index_data, needs_index_update def run_async_function(func, *args): asyncio.run(func(*args)) async def start_shell_speak(settings, base_path, vector_db): await main_start(settings, base_path, vector_db) async def main_start(settings, base_path, vector_db): # Initialize VectorDatabase here if needed globally shellSpeak = ShellSpeak(settings, base_path, vector_db) await shellSpeak.run() def main(): base_path = os.path.abspath(".")
settings = load_settings(base_path)
3
2023-10-31 23:35:19+00:00
16k
qym7/SparseDiff
sparse_diffusion/diffusion_model_sparse.py
[ { "identifier": "utils", "path": "sparse_diffusion/utils.py", "snippet": "def setup_wandb(cfg):\ndef create_folders(args):\ndef to_dense(x, edge_index, edge_attr, batch, charge):\ndef to_dense_node(x, batch):\ndef to_dense_edge(edge_index, edge_attr, batch, max_num_nodes):\ndef encode_no_edge(E):\ndef to_sparse(X, E, y, node_mask, charge=None):\n def __init__(self, X, E, y, charge=None, t_int=None, t=None, node_mask=None):\n def device_as(self, x: torch.Tensor):\n def type_as(self, x: torch.Tensor):\n def mask(self, node_mask=None, collapse=False):\n def collapse(self, collapse_charge=None):\n def __repr__(self):\n def copy(self):\n def __init__(\n self, node, edge_index, edge_attr, y, ptr=None, batch=None, charge=None\n ):\n def type_as(self, x: torch.Tensor):\n def to_device(self, device: str):\n def coalesce(self):\n def symmetry(self):\n def collapse(self, collapse_charge=None):\n def __init__(self, keep_chain):\n def append(self, data):\ndef delete_repeated_twice_edges(edge_index, edge_attr): \ndef to_undirected(edge_index, edge_attr=None):\ndef undirected_to_directed(edge_index, edge_attr=None):\ndef ptr_to_node_mask(ptr, batch, n_node):\ndef concat_sparse_graphs(graphs):\ndef split_samples(samples, start_idx, end_idx):\ndef densify_noisy_data(sparse_noisy_data):\n E = to_dense_edge(edge_index, edge_attr, batch, max_num_nodes)\n E = to_dense_adj(\n edge_index=edge_index,\n batch=batch,\n edge_attr=edge_attr,\n max_num_nodes=max_num_nodes,\n )\n E = encode_no_edge(E)\n E[:, :, :, 0] = first_elt\nclass PlaceHolder:\nclass SparsePlaceHolder:\nclass SparseChainPlaceHolder:" }, { "identifier": "diffusion_utils", "path": "sparse_diffusion/diffusion/diffusion_utils.py", "snippet": "def sum_except_batch(x):\ndef assert_correctly_masked(variable, node_mask):\ndef sample_gaussian(size):\ndef sample_gaussian_with_mask(size, node_mask):\ndef clip_noise_schedule(alphas2, clip_value=0.001):\ndef cosine_beta_schedule(timesteps, s=0.008, raise_to_power: float = 1):\ndef cosine_beta_schedule_discrete(timesteps, s=0.008):\ndef custom_beta_schedule_discrete(timesteps, average_num_nodes=50, s=0.008):\ndef gaussian_KL(q_mu, q_sigma):\ndef cdf_std_gaussian(x):\ndef SNR(gamma):\ndef inflate_batch_array(array, target_shape):\ndef sigma(gamma, target_shape):\ndef alpha(gamma, target_shape):\ndef check_mask_correct(variables, node_mask):\ndef check_tensor_same_size(*args):\ndef sigma_and_alpha_t_given_s(\n gamma_t: torch.Tensor, gamma_s: torch.Tensor, target_size: torch.Size\n):\ndef reverse_tensor(x):\ndef sample_discrete_features(probX, probE, node_mask, prob_charge=None):\ndef sample_discrete_edge_features(probE, node_mask):\ndef sample_discrete_node_features(probX, node_mask):\ndef compute_posterior_distribution(M, M_t, Qt_M, Qsb_M, Qtb_M):\ndef compute_sparse_posterior_distribution(M, M_t, Qt_M, Qsb_M, Qtb_M):\ndef compute_batched_over0_posterior_distribution(X_t, Qt, Qsb, Qtb):\ndef mask_distributions(\n true_X, true_E, pred_X, pred_E, node_mask, true_charge=None, pred_charge=None\n):\ndef posterior_distributions(X, E, X_t, E_t, y_t, Qt, Qsb, Qtb, charge, charge_t):\ndef sample_discrete_feature_noise(limit_dist, node_mask):\ndef sample_sparse_discrete_feature_noise(limit_dist, node_mask):\ndef compute_sparse_batched_over0_posterior_distribution(\n input_data, batch, Qt, Qsb, Qtb\n):\n M = M.flatten(start_dim=1, end_dim=-2).to(\n torch.float32\n ) # (bs, N, d) with N = n or n * n\n U_X = x_limit.flatten(end_dim=-2).multinomial(1).reshape(bs, n_max)\n U_E = e_limit.flatten(end_dim=-2).multinomial(1).reshape(bs, n_max, n_max)\n U_X = U_X.type_as(long_mask)\n U_E = U_E.type_as(long_mask)\n U_X = F.one_hot(U_X, num_classes=x_limit.shape[-1]).float()\n U_E = F.one_hot(U_E, num_classes=e_limit.shape[-1]).float()\n U_E = U_E * upper_triangular_mask\n U_E = U_E + torch.transpose(U_E, 1, 2)" }, { "identifier": "get_computational_graph", "path": "sparse_diffusion/diffusion/sample_edges_utils.py", "snippet": "def get_computational_graph(\n triu_query_edge_index,\n clean_edge_index,\n clean_edge_attr,\n triu=True,\n):\n \"\"\"\n concat and remove repeated edges of query_edge_index and clean_edge_index\n mask the position of query_edge_index\n in case where query_edge_attr is None, return query_edge_attr as 0\n else, return query_edge_attr for all query_edge_index\n (used in apply noise, when we need to sample the query edge attr)\n \"\"\"\n # get dimension information\n de = clean_edge_attr.shape[-1]\n device = triu_query_edge_index.device\n\n # create default query edge attr\n default_query_edge_attr = torch.zeros((triu_query_edge_index.shape[1], de)).to(\n device\n )\n default_query_edge_attr[:, 0] = 1\n\n # if query_edge_attr is None, use default query edge attr\n if triu:\n # make random edges symmetrical\n query_edge_index, default_query_edge_attr = utils.to_undirected(\n triu_query_edge_index, default_query_edge_attr\n )\n _, default_query_edge_attr = utils.to_undirected(\n triu_query_edge_index, default_query_edge_attr\n )\n else:\n query_edge_index, default_query_edge_attr = triu_query_edge_index, default_query_edge_attr\n\n # get the computational graph: positive edges + random edges\n comp_edge_index = torch.hstack([clean_edge_index, query_edge_index])\n default_comp_edge_attr = torch.argmax(\n torch.vstack([clean_edge_attr, default_query_edge_attr]), -1\n )\n\n # reduce repeated edges and get the mask\n assert comp_edge_index.dtype == torch.long\n _, min_default_edge_attr = coalesce(\n comp_edge_index, default_comp_edge_attr, reduce=\"min\"\n )\n\n max_comp_edge_index, max_default_edge_attr = coalesce(\n comp_edge_index, default_comp_edge_attr, reduce=\"max\"\n )\n query_mask = min_default_edge_attr == 0\n comp_edge_attr = F.one_hot(max_default_edge_attr.long(), num_classes=de).float()\n\n return query_mask, max_comp_edge_index, comp_edge_attr" }, { "identifier": "mask_query_graph_from_comp_graph", "path": "sparse_diffusion/diffusion/sample_edges_utils.py", "snippet": "def mask_query_graph_from_comp_graph(\n triu_query_edge_index, edge_index, edge_attr, num_classes\n):\n query_edge_index = utils.to_undirected(triu_query_edge_index)\n # import pdb; pdb.set_trace()\n\n all_edge_index = torch.hstack([edge_index, query_edge_index])\n all_edge_attr = torch.hstack(\n [\n torch.argmax(edge_attr, -1),\n torch.zeros(query_edge_index.shape[1]).to(edge_index.device),\n ]\n )\n\n assert all_edge_index.dtype == torch.long\n _, min_edge_attr = coalesce(all_edge_index, all_edge_attr, reduce=\"min\")\n\n max_edge_index, max_edge_attr = coalesce(\n all_edge_index, all_edge_attr, reduce=\"max\"\n )\n\n return (\n min_edge_attr == 0,\n F.one_hot(max_edge_attr.long(), num_classes=num_classes),\n max_edge_index,\n )" }, { "identifier": "sample_non_existing_edge_attr", "path": "sparse_diffusion/diffusion/sample_edges_utils.py", "snippet": "def sample_non_existing_edge_attr(query_edges_dist_batch, num_edges_to_sample):\n device = query_edges_dist_batch.device\n max_edges_to_sample = int(num_edges_to_sample.max())\n\n if max_edges_to_sample == 0:\n return torch.tensor([]).to(device)\n\n query_mask = (\n torch.ones((len(num_edges_to_sample), max_edges_to_sample))\n .cumsum(-1)\n .to(device)\n )\n query_mask[\n query_mask > num_edges_to_sample.unsqueeze(-1).repeat(1, max_edges_to_sample)\n ] = 0\n query_mask[query_mask > 0] = 1\n query_edge_attr = (\n torch.multinomial(query_edges_dist_batch, max_edges_to_sample, replacement=True)\n + 1\n )\n query_edge_attr = query_edge_attr.flatten()[query_mask.flatten().bool()]\n\n return query_edge_attr" }, { "identifier": "condensed_to_matrix_index_batch", "path": "sparse_diffusion/diffusion/sample_edges_utils.py", "snippet": "def condensed_to_matrix_index_batch(condensed_index, num_nodes, edge_batch, ptr):\n \"\"\"From https://stackoverflow.com/questions/5323818/condensed-matrix-function-to-find-pairs.\n condensed_index: (E) example: [0, 1, 0, 2] where [0, 1] are edges for graph0 and [0,2] edges for graph 1\n num_nodes: (bs)\n edge_batch: (E): tells to which graph each edge belongs\n ptr: (bs+1): contains the offset for the number of nodes in each graph.\n \"\"\"\n bb = -2 * num_nodes[edge_batch] + 1\n\n # Edge ptr adds an offset of n (n-1) / 2 to each edge index\n ptr_condensed_index = condensed_index\n ii = torch.div(\n (-bb - torch.sqrt(bb**2 - 8 * ptr_condensed_index)), 2, rounding_mode=\"floor\"\n )\n jj = (\n ptr_condensed_index\n + torch.div(ii * (bb + ii + 2), 2, rounding_mode=\"floor\")\n + 1\n )\n return torch.vstack((ii.long(), jj.long())) + ptr[edge_batch]" }, { "identifier": "sample_query_edges", "path": "sparse_diffusion/diffusion/sample_edges.py", "snippet": "def sample_query_edges(\n num_nodes_per_graph: Tensor, edge_proportion=None, num_edges_to_sample=None\n):\n \"\"\"Sample edge_proportion % of edges in each graph\n num_nodes_per_graph: (bs): tensor of int.\n Return: edge_index, batch\n \"\"\"\n assert num_nodes_per_graph.dtype == torch.long\n # num_nodes could be 1 in QM9\n assert torch.all(num_nodes_per_graph >= 1), num_nodes_per_graph\n\n batch_size = len(num_nodes_per_graph)\n device = num_nodes_per_graph.device\n\n n = num_nodes_per_graph\n max_condensed_value = (n * (n - 1) / 2).long()\n if num_edges_to_sample is None and edge_proportion is not None:\n assert 0 < edge_proportion <= 1, edge_proportion\n num_edges_to_sample = torch.ceil(edge_proportion * max_condensed_value).long()\n elif num_edges_to_sample is not None:\n assert num_edges_to_sample.dtype == torch.long\n else:\n raise ValueError(\n \"Either edge_proportion or num_edges_to_sample should be provided\"\n )\n\n condensed_index, edge_batch = sampled_condensed_indices_uniformly(\n max_condensed_value, num_edges_to_sample\n )\n\n if batch_size == 1:\n edge_index = condensed_to_matrix_index(condensed_index, num_nodes=n[0])\n return edge_index, torch.zeros(n, dtype=torch.long, device=device)\n\n if len(torch.unique(num_nodes_per_graph)) == 1:\n # Case of several graphs of the same size\n # Add the offset to the edge_index\n offset = torch.cumsum(num_nodes_per_graph, dim=0)[:-1] # (bs - 1)\n offset = torch.cat(\n (torch.zeros(1, device=device, dtype=torch.long), offset)\n ) # (bs)\n\n edge_index = condensed_to_matrix_index_batch(\n condensed_index,\n num_nodes=num_nodes_per_graph,\n edge_batch=edge_batch,\n ptr=offset,\n )\n return edge_index, torch.arange(batch_size, device=device).repeat_interleave(n)\n\n # Most general case: graphs of varying sizes\n # condensed_index = randperm_expanded[complete_mask] # (sum(num_edges_per_graph))\n offset = torch.cumsum(num_nodes_per_graph, dim=0)[:-1] # (bs - 1)\n offset = torch.cat(\n (torch.zeros(1, device=device, dtype=torch.long), offset)\n ) # (bs)\n edge_index = condensed_to_matrix_index_batch(\n condensed_index,\n num_nodes=num_nodes_per_graph,\n edge_batch=edge_batch,\n ptr=offset,\n )\n # Get the batch information\n batch = torch.arange(batch_size, device=device).repeat_interleave(\n num_nodes_per_graph\n )\n return edge_index, batch" }, { "identifier": "sample_non_existing_edges_batched", "path": "sparse_diffusion/diffusion/sample_edges.py", "snippet": "def sample_non_existing_edges_batched(\n num_edges_to_sample, existing_edge_index, num_nodes, batch\n):\n \"\"\"Sample non-existing edges from a complete graph.\n num_edges_to_sample: (bs) long\n existing_edge_index: (2, E)\n num_nodes: (bs) long\n batch: (N) long\n existing_edge_index only contains edges that exist in the top part of triangle matrix\n \"\"\"\n device = existing_edge_index.device\n unit_graph_mask = num_nodes == 1\n unit_graph_mask_offset = torch.cat(\n (torch.zeros(1, device=device, dtype=torch.bool), unit_graph_mask[:-1])\n )\n\n # Compute the number of existing and non-existing edges.\n num_edges_total = (num_nodes * (num_nodes - 1) / 2).long()\n # Count existing edges using global pooling. In case a graph has no edge, global_add_pool\n # May return something of the wrong length. To avoid this, add a 0 for each graph\n # TODO: check if it can be simplified using the size argument of global add pool\n # full_edge_count = torch.hstack((torch.ones(existing_edge_index.shape[1], device=device),\n # torch.zeros(batch.max()+1, device=device))) # (ne+bs)\n # full_edge_batch = torch.hstack((batch[existing_edge_index[0]],\n # torch.arange(batch.max()+1, device=device))) # (ne+bs)\n # num_edges_existing = pool.global_add_pool(x=full_edge_count, batch=full_edge_batch).long()\n num_edges_existing = pool.global_add_pool(\n x=torch.ones(existing_edge_index.shape[1], device=device),\n batch=batch[existing_edge_index[0]],\n size=len(num_edges_to_sample),\n ).long()\n num_non_existing_edges = num_edges_total - num_edges_existing\n assert (num_edges_to_sample <= num_non_existing_edges).all(), (\n num_edges_to_sample,\n num_non_existing_edges,\n )\n\n # Sample non-existing edge indices without considering existing edges.\n # print(\"Num edges non existing\", num_non_existing_edges)\n # multinomial and not randint because we want to sample without replacement\n sampled_indices, sampled_edge_batch = sampled_condensed_indices_uniformly(\n max_condensed_value=num_non_existing_edges,\n num_edges_to_sample=num_edges_to_sample,\n )\n\n # Compute the offset (bs, ) for each graph, where offset -> nbr of nodes, sq_offset -> nbr of edges\n # Go from a matrix problem to a 1d problem, it is easier\n existing_edge_batch = batch[existing_edge_index[0]]\n num_edges_total = (num_nodes * (num_nodes - 1) / 2).long()\n sq_offset = torch.cumsum(num_edges_total, dim=0)[:-1] # (bs - 1)\n # Prepend a 0\n sq_offset = torch.cat(\n (torch.zeros(1, device=device, dtype=torch.long), sq_offset)\n ) # (bs)\n\n offset = torch.cumsum(num_nodes, dim=0)[\n :-1\n ] # (bs - 1) # (bs - 1)\n offset = torch.cat(\n (torch.zeros(1, device=device, dtype=torch.long), offset)\n ) # (bs)\n # existing_indices (E, ) is of form [0 1 2 3 4 0 2 3 4]\n rescaled_edge_index = (\n existing_edge_index - offset[existing_edge_batch]\n ) # of form [0 1 2 3 4 0 2 3 4]\n existing_indices = matrix_to_condensed_index_batch(\n rescaled_edge_index, num_nodes=num_nodes, edge_batch=existing_edge_batch\n )\n\n # Add offset to the sampled indices\n # Example of sampled condensed: [0 3 1 0 2]\n epsilon = 0.1\n sampled_indices_offset = sq_offset[sampled_edge_batch] # (E_sample, )\n # print(\"sampled indices\", sampled_indices)\n # print(\"sampled edge batch\", sampled_edge_batch)\n samp_ind_w_offset = sampled_indices + sampled_indices_offset\n samp_ind_w_offset = torch.sort(samp_ind_w_offset)[\n 0\n ] # E.g. [0 1 3 6 8], where [0 1 3] belong to a graph of 4 nodes, [6 8] to a graph of 3 nodes\n # print(\"Sampled indices with offset\", samp_ind_w_offset)\n # add small value to create an order later in the sort\n samp_ind_w_offset = samp_ind_w_offset + epsilon\n\n # Add virtual edges to the existing edges to mark the beginning of each graph, for batch processing\n # After adding epsilon, sqrt_ptr is smaller than all edges of the next graph, and bigger than all edges of the current graph\n # * when there exists graphs with size 1, there might be identical values in sq_offset, also in virtual nodes\n existing_ind_w_offset = existing_indices + sq_offset[existing_edge_batch]\n virtual_nodes = (\n sq_offset - epsilon\n ) # Introduce virtual nodes that will be used later to split graphs\n # add different offset for graphs of size 1 to separate them and their following graphs\n virtual_nodes[unit_graph_mask] = virtual_nodes[unit_graph_mask] - 0.1\n existing_ind_w_offset = torch.cat((existing_ind_w_offset, virtual_nodes))\n existing_ind_w_offset, existing_condensed_offset_argsort = torch.sort(\n existing_ind_w_offset\n )\n # print(\"Existing condensed indices with offset\", existing_ind_w_offset)\n virtual_existing_mask = torch.cat(\n (\n torch.zeros(len(existing_indices), dtype=torch.long, device=device),\n torch.ones(len(sq_offset), dtype=torch.long, device=device),\n )\n )\n virtual_existing_mask = virtual_existing_mask[\n existing_condensed_offset_argsort\n ] # [1 0 0 0 1 0 0]\n # print('Virtual nodes mask', virtual_existing_mask)\n\n # Compute the mask of free edges\n # When there exists graphs with size 1, free spots might be negative, which means that\n # existing condensed indices have same neighbor value\n free_spots = (\n torch.diff(existing_ind_w_offset, prepend=torch.tensor([-1]).to(device)) - 1\n ) # [-0.1, 0, 2, 9, 9.9, 18, 25]\n free_spots = torch.ceil(free_spots).long() # [0, 0, 1, 6, 0, 8, 6]\n # print(\"Free spots\", free_spots)\n # Map these values to index\n cumsum = torch.cumsum(free_spots, dim=0).long() # [1 2 3 4 5 6 7]\n cumsum_batch = (\n torch.cumsum(virtual_existing_mask, dim=0).long() - 1\n ) # [1 1 1 1 2 2 2] - 1\n # delete the offset of free spots to cumsum\n cumsum_offset = cumsum[virtual_existing_mask.bool()][cumsum_batch]\n # print(\"Cumsum offset\", cumsum_offset)\n # print(\"Cumsum before removing offset\", cumsum)\n cumsum = cumsum - cumsum_offset # [0 2 5 0 2 5]\n # add the offset of edge number to cumsum\n cumsum = cumsum + sq_offset[cumsum_batch] # [0 2 5 6 8 11]\n # print(\"Cumsum\", cumsum)\n # Cumsum now contains the number of free spots at the left -- it is computed separetely for each graph\n # An offset is added on the result\n\n # Add virtual edges to the sampled edges to mark the end of each graph\n num_sampled_edges = len(sampled_indices)\n num_virtual_nodes = len(sq_offset)\n num_free_spots_indices = len(cumsum)\n\n # Group the different vectors together: the existing edges, the virtual nodes and the free spots\n grouped = torch.cat((samp_ind_w_offset, virtual_nodes, cumsum))\n # print(\"grouped\", grouped)\n sorted, argsort = torch.sort(grouped)\n # print(\"sorted\", sorted)\n # Create the masks corresponding to these 3 types of objects\n num_total = num_sampled_edges + num_virtual_nodes + num_free_spots_indices\n # mask is created for virtual nodes, in order to reduce the offset for cumsum\n virtual_sampled_mask = torch.zeros(num_total, dtype=torch.bool, device=device)\n virtual_sampled_mask[\n num_sampled_edges : num_sampled_edges + num_virtual_nodes\n ] = True\n virtual_sampled_mask = virtual_sampled_mask[argsort]\n\n free_spots_ind_mask = torch.zeros(num_total, dtype=torch.bool, device=device)\n free_spots_ind_mask[-num_free_spots_indices:] = True\n free_spots_ind_mask = free_spots_ind_mask[argsort]\n\n sampled_ind_mask = torch.zeros(num_total, dtype=torch.bool, device=device)\n sampled_ind_mask[:num_sampled_edges] = True\n sampled_ind_mask = sampled_ind_mask[argsort]\n\n # to_shift tells by how much to shift sampled and virtual edges\n to_shift = torch.cumsum(free_spots_ind_mask, dim=0) # - sampled_edge_batch\n # print(\"to_shift\", to_shift)\n new_indices = sorted + to_shift\n # remove epsilon added to sampled edges\n new_indices = new_indices[sampled_ind_mask] - epsilon\n # remove cumsum_offset to unify the indices of different graphs from cumsum_mask\n # 1 is added to compensate the fact that cumsum is computed with virtual nodes\n cumsum_offset = to_shift[virtual_sampled_mask.bool()][sampled_edge_batch] + 1\n cumsum_offset[unit_graph_mask_offset[sampled_edge_batch]] = (\n cumsum_offset[unit_graph_mask_offset[sampled_edge_batch]] + 1\n )\n # print(\"Cumsum offset\", cumsum_offset)\n # remove sq_offset contained by sorted\n new_indices = new_indices - cumsum_offset - sq_offset[sampled_edge_batch]\n # print(\"New indices long\", new_indices)\n new_indices = new_indices.round()\n # print('Existing edge indices', existing_indices)\n # Convert to matrix index.\n new_edge_index = condensed_to_matrix_index_batch(\n condensed_index=new_indices,\n num_nodes=num_nodes,\n edge_batch=sampled_edge_batch,\n ptr=offset,\n )\n\n # # debugging\n # # check if there are repeated edges\n # print('smallest graph size is {}'.format(num_nodes.min()))\n # existing_ind_w_offset = existing_indices + sq_offset[existing_edge_batch]\n # samp_ind_w_offset = new_indices + sq_offset[sampled_edge_batch]\n # repeated = existing_ind_w_offset.round().unsqueeze(1) == samp_ind_w_offset.round().unsqueeze(0)\n # repeated_ind = torch.where(repeated)\n # if repeated.sum()>0:\n # print('repeated edges')\n # import pdb; pdb.set_trace()\n # cur_shift = to_shift[sampled_ind_mask][1188] - cumsum_offset[1188]\n\n return new_edge_index" }, { "identifier": "sampled_condensed_indices_uniformly", "path": "sparse_diffusion/diffusion/sample_edges.py", "snippet": "def sampled_condensed_indices_uniformly(\n max_condensed_value, num_edges_to_sample, return_mask=False\n):\n \"\"\"Max_condensed value: (bs) long tensor\n num_edges_to_sample: (bs) long tensor\n Return: condensed_index e.g. [0 1 3 0 2]\n \"\"\"\n assert (0 <= num_edges_to_sample).all(), (\n num_edges_to_sample <= max_condensed_value\n ).all()\n batch_size = max_condensed_value.shape[0]\n device = max_condensed_value.device\n\n if (\n len(torch.unique(max_condensed_value)) == 1\n and len(torch.unique(num_edges_to_sample)) == 1\n ):\n max_val = max_condensed_value[0]\n to_sample = num_edges_to_sample[0]\n sampled_condensed = torch.multinomial(\n torch.ones(max_val, device=device), num_samples=to_sample, replacement=False\n )\n edge_batch = torch.zeros(\n num_edges_to_sample[0], device=device, dtype=torch.long\n )\n if batch_size == 1:\n if return_mask:\n condensed_mask = torch.arange(num_edges_to_sample[0], device=device)\n return sampled_condensed, edge_batch, condensed_mask\n\n return sampled_condensed, edge_batch\n\n # Case of several graphs of the same size\n # Repeat the edge_index for each graph and aggregate them\n sampled_condensed_repeated = (\n sampled_condensed.unsqueeze(0).expand(batch_size, -1).flatten()\n )\n edge_batch = torch.arange(batch_size, device=device).repeat_interleave(\n to_sample\n )\n\n if return_mask:\n condensed_mask = torch.arange(num_edges_to_sample[0], device=device)\n condensed_mask = (\n condensed_mask.unsqueeze(0).expand(batch_size, -1).flatten()\n )\n return sampled_condensed_repeated, edge_batch, condensed_mask\n\n return sampled_condensed_repeated, edge_batch\n\n # Most general case: graphs of varying sizes\n max_size = torch.max(max_condensed_value)\n # import pdb; pdb.set_trace()\n if max_size > 10**7:\n print(\"[Warning]: sampling random edges might bew slow\")\n\n randperm_full = torch.randperm(max_size, device=device) # (max_condensed)\n randperm_expanded = randperm_full.unsqueeze(0).expand(\n batch_size, -1\n ) # (bs, max_condensed)\n\n # General goal: keep the indices on the left that are not too big for each graph\n # Mask1 is used to mask the indices that are too large for current graph\n mask1 = randperm_expanded < max_condensed_value.unsqueeze(1) # (bs, max_condensed)\n\n # Cumsum(mask1) is the number of valid indices on the left of each index\n # Mask2 will select the right number of indices on the left\n mask2 = torch.cumsum(mask1, dim=1) <= num_edges_to_sample.unsqueeze(\n 1\n ) # (bs, max_condensed)\n complete_mask = mask1 * mask2\n condensed_index = randperm_expanded[complete_mask] # (sum(num_edges_per_graph))\n edge_batch = (\n torch.arange(batch_size, device=device)\n .unsqueeze(1)\n .expand(-1, max_size)[complete_mask]\n )\n\n if return_mask:\n complete_mask = complete_mask.cumsum(-1)[complete_mask] - 1\n return condensed_index, edge_batch, complete_mask\n\n return condensed_index, edge_batch" }, { "identifier": "SignNetNodeEncoder", "path": "sparse_diffusion/models/sign_pos_encoder.py", "snippet": "class SignNetNodeEncoder(torch.nn.Module):\n \"\"\"SignNet Positional Embedding node encoder.\n https://arxiv.org/abs/2202.13013\n https://github.com/cptq/SignNet-BasisNet\n Uses precomputated Laplacian eigen-decomposition, but instead\n of eigen-vector sign flipping + DeepSet/Transformer, computes the PE as:\n SignNetPE(v_1, ... , v_k) = \\rho ( [\\phi(v_i) + \\rhi(−v_i)]^k_i=1 )\n where \\phi is GIN network applied to k first non-trivial eigenvectors, and\n \\rho is an MLP if k is a constant, but if all eigenvectors are used then\n \\rho is DeepSet with sum-pooling.\n SignNetPE of size dim_pe will get appended to each node feature vector.\n If `expand_x` set True, original node features will be first linearly\n projected to (dim_emb - dim_pe) size and the concatenated with SignNetPE.\n Args:\n dim_emb: Size of final node embedding\n expand_x: Expand node features `x` from dim_in to (dim_emb - dim_pe)\n \"\"\"\n\n def __init__(self, dataset_infos, sn_hidden_dim, k_node, expand_x=True):\n \"\"\"\n Initialize the model with the default parameters.\n \"\"\"\n super().__init__()\n self.dataset_infos = dataset_infos\n self.k_node = k_node\n dim_in = (\n dataset_infos.input_dims.X + dataset_infos.input_dims.charge - self.k_node\n ) # Expected original input node features dim\n dim_emb = sn_hidden_dim\n\n dim_pe = 16 # Size of PE embedding\n model_type = \"DeepSet\" # Encoder NN model type for SignNet\n\n if model_type not in [\"MLP\", \"DeepSet\"]:\n raise ValueError(f\"Unexpected SignNet model {model_type}\")\n self.model_type = model_type\n sign_inv_layers = 3 # Num. layers in \\phi GNN part\n rho_layers = 1 # Num. layers in \\rho MLP/DeepSet\n\n if rho_layers < 1:\n raise ValueError(f\"Num layers in rho model has to be positive.\")\n\n max_freqs = 10 # Num. eigenvectors (frequencies)\n self.pass_as_var = False # Pass PE also as a separate variable\n\n if dim_emb - dim_pe < 1:\n raise ValueError(\n f\"SignNet PE size {dim_pe} is too large for \"\n f\"desired embedding size of {dim_emb}.\"\n )\n\n if expand_x:\n self.linear_x = nn.Linear(dim_in, dim_emb - dim_pe)\n self.expand_x = expand_x\n\n # Sign invariant neural network.\n if self.model_type == \"MLP\":\n self.sign_inv_net = GINDeepSigns(\n in_channels=1,\n hidden_channels=64,\n out_channels=4,\n num_layers=sign_inv_layers,\n k=max_freqs,\n dim_pe=dim_pe,\n rho_num_layers=rho_layers,\n use_bn=True,\n dropout=0.0,\n activation=\"relu\",\n )\n elif self.model_type == \"DeepSet\":\n self.sign_inv_net = MaskedGINDeepSigns(\n in_channels=1,\n hidden_channels=64,\n out_channels=4,\n num_layers=sign_inv_layers,\n dim_pe=dim_pe,\n rho_num_layers=rho_layers,\n use_bn=True,\n dropout=0.0,\n activation=\"relu\",\n )\n else:\n raise ValueError(f\"Unexpected model {self.model_type}\")\n\n def forward(self, x, edge_index, batch):\n eigvecs = x[:, -self.k_node:]\n x = x[:, : -self.k_node]\n\n pos_enc = eigvecs.unsqueeze(-1) # (Num nodes) x (Num Eigenvectors) x 1\n\n empty_mask = torch.isnan(pos_enc)\n pos_enc[empty_mask] = 0 # (Num nodes) x (Num Eigenvectors) x 1\n\n # SignNet\n pos_enc = self.sign_inv_net(\n pos_enc, edge_index, batch\n ) # (Num nodes) x (pos_enc_dim)\n\n # Expand node features if needed\n if self.expand_x:\n h = self.linear_x(x)\n else:\n h = x\n\n # Concatenate final PEs to input embedding\n x = torch.cat((h, pos_enc), 1)\n # Keep PE also separate in a variable (e.g. for skip connections to input)\n\n return x" } ]
import time import os import math import pickle import json import torch import wandb import numpy as np import torch.nn as nn import torch.nn.functional as F import pytorch_lightning as pl from tqdm import tqdm from models.conv_transformer_model import GraphTransformerConv from diffusion.noise_schedule import ( PredefinedNoiseScheduleDiscrete, MarginalUniformTransition, ) from metrics.train_metrics import TrainLossDiscrete from metrics.abstract_metrics import SumExceptBatchMetric, SumExceptBatchKL, NLL from analysis.visualization import Visualizer from sparse_diffusion import utils from sparse_diffusion.diffusion import diffusion_utils from sparse_diffusion.diffusion.sample_edges_utils import ( get_computational_graph, mask_query_graph_from_comp_graph, sample_non_existing_edge_attr, condensed_to_matrix_index_batch, ) from sparse_diffusion.diffusion.sample_edges import ( sample_query_edges, sample_non_existing_edges_batched, sampled_condensed_indices_uniformly, ) from sparse_diffusion.models.sign_pos_encoder import SignNetNodeEncoder
11,076
pred_node, pred_edge, p_s_and_t_given_0_X, p_s_and_t_given_0_E, pred_charge, p_s_and_t_given_0_charge, ): sampled_node = self.sample_sparse_node(pred_node, p_s_and_t_given_0_X).long() sampled_edge = self.sample_sparse_edge(pred_edge, p_s_and_t_given_0_E).long() if pred_charge.size(-1) > 0: sampled_charge = self.sample_sparse_node( pred_charge, p_s_and_t_given_0_charge ).long() else: sampled_charge = pred_charge return sampled_node, sampled_edge, sampled_charge def sample_p_zs_given_zt(self, s_float, t_float, data): """ Samples from zs ~ p(zs | zt). Only used during sampling. if last_step, return the graph prediction as well """ node = data.node edge_index = data.edge_index edge_attr = data.edge_attr y = data.y charge = data.charge ptr = data.ptr batch = data.batch beta_t = self.noise_schedule(t_normalized=t_float) # (bs, 1) alpha_s_bar = self.noise_schedule.get_alpha_bar(t_normalized=s_float) alpha_t_bar = self.noise_schedule.get_alpha_bar(t_normalized=t_float) # Retrieve transitions matrix Qtb = self.transition_model.get_Qt_bar(alpha_t_bar, self.device) Qsb = self.transition_model.get_Qt_bar(alpha_s_bar, self.device) Qt = self.transition_model.get_Qt(beta_t, self.device) # Prior distribution # (N, dx, dx) p_s_and_t_given_0_X = ( diffusion_utils.compute_sparse_batched_over0_posterior_distribution( input_data=node, batch=batch, Qt=Qt.X, Qsb=Qsb.X, Qtb=Qtb.X ) ) p_s_and_t_given_0_charge = None if self.use_charge: p_s_and_t_given_0_charge = ( diffusion_utils.compute_sparse_batched_over0_posterior_distribution( input_data=charge, batch=batch, Qt=Qt.charge, Qsb=Qsb.charge, Qtb=Qtb.charge, ) ) # prepare sparse information num_nodes = ptr.diff().long() num_edges = (num_nodes * (num_nodes - 1) / 2).long() # If we had one graph, we will iterate on all edges for each step # we also make sure that the non existing edge number remains the same with the training process ( all_condensed_index, all_edge_batch, all_edge_mask, ) = sampled_condensed_indices_uniformly( max_condensed_value=num_edges, num_edges_to_sample=num_edges, return_mask=True, ) # double checked # number of edges used per loop for each graph num_edges_per_loop = torch.ceil(self.edge_fraction * num_edges) # (bs, ) len_loop = math.ceil(1. / self.edge_fraction) new_edge_index, new_edge_attr, new_charge = ( torch.zeros((2, 0), device=self.device, dtype=torch.long), torch.zeros(0, device=self.device), torch.zeros(0, device=self.device, dtype=torch.long), ) # create the new data for calculation sparse_noisy_data = { "node_t": node, "edge_index_t": edge_index, "edge_attr_t": edge_attr, "batch": batch, "y_t": y, "ptr": ptr, "charge_t": charge, "t_int": (t_float * self.T).int(), "t_float": t_float, } for i in range(len_loop): if self.autoregressive and i != 0: sparse_noisy_data["edge_index_t"] = new_edge_index sparse_noisy_data["edge_attr_t"] = new_edge_attr # the last loop might have less edges, we need to make sure that each loop has the same number of edges if i == len_loop - 1: edges_to_consider_mask = all_edge_mask >= ( num_edges[all_edge_batch] - num_edges_per_loop[all_edge_batch] ) else: # [0, 3, 2, 1, 0, 3, 2, 1, 0, 3, 2, 1] # all_condensed_index is not sorted inside the graph, but it sorted for graph batch edges_to_consider_mask = torch.logical_and( all_edge_mask >= num_edges_per_loop[all_edge_batch] * i, all_edge_mask < num_edges_per_loop[all_edge_batch] * (i + 1), ) # get query edges and pass to matrix index triu_query_edge_index = all_condensed_index[edges_to_consider_mask] query_edge_batch = all_edge_batch[edges_to_consider_mask]
class DiscreteDenoisingDiffusion(pl.LightningModule): model_dtype = torch.float32 best_val_nll = 1e8 val_counter = 0 start_epoch_time = None val_iterations = None def __init__( self, cfg, dataset_infos, train_metrics, extra_features, domain_features, val_sampling_metrics, test_sampling_metrics, ): super().__init__() self.in_dims = dataset_infos.input_dims self.out_dims = dataset_infos.output_dims self.use_charge = cfg.model.use_charge and self.out_dims.charge > 1 self.node_dist = dataset_infos.nodes_dist self.extra_features = extra_features self.domain_features = domain_features self.sign_net = cfg.model.sign_net if not self.sign_net: cfg.model.sn_hidden_dim = 0 # sparse settings self.edge_fraction = cfg.model.edge_fraction self.autoregressive = cfg.model.autoregressive self.cfg = cfg self.test_variance = cfg.general.test_variance self.dataset_info = dataset_infos self.visualization_tools = Visualizer(dataset_infos) self.name = cfg.general.name self.T = cfg.model.diffusion_steps self.train_loss = TrainLossDiscrete(cfg.model.lambda_train, self.edge_fraction) self.train_metrics = train_metrics self.val_sampling_metrics = val_sampling_metrics self.test_sampling_metrics = test_sampling_metrics # TODO: transform to torchmetrics.MetricCollection self.val_nll = NLL() # self.val_metrics = torchmetrics.MetricCollection([]) self.val_X_kl = SumExceptBatchKL() self.val_E_kl = SumExceptBatchKL() self.val_X_logp = SumExceptBatchMetric() self.val_E_logp = SumExceptBatchMetric() self.best_nll = 1e8 self.best_epoch = 0 # TODO: transform to torchmetrics.MetricCollection self.test_nll = NLL() self.test_X_kl = SumExceptBatchKL() self.test_E_kl = SumExceptBatchKL() self.test_X_logp = SumExceptBatchMetric() self.test_E_logp = SumExceptBatchMetric() if self.use_charge: self.val_charge_kl = SumExceptBatchKL() self.val_charge_logp = SumExceptBatchMetric() self.test_charge_kl = SumExceptBatchKL() self.test_charge_logp = SumExceptBatchMetric() self.model = GraphTransformerConv( n_layers=cfg.model.n_layers, input_dims=self.in_dims, hidden_dims=cfg.model.hidden_dims, output_dims=self.out_dims, sn_hidden_dim=cfg.model.sn_hidden_dim, output_y=cfg.model.output_y, dropout=cfg.model.dropout ) # whether to use sign net if self.sign_net and cfg.model.extra_features == "all": self.sign_net = SignNetNodeEncoder( dataset_infos, cfg.model.sn_hidden_dim, cfg.model.num_eigenvectors ) # whether to use scale layers self.scaling_layer = cfg.model.scaling_layer ( self.node_scaling_layer, self.edge_scaling_layer, self.graph_scaling_layer, ) = self.get_scaling_layers() self.noise_schedule = PredefinedNoiseScheduleDiscrete( cfg.model.diffusion_noise_schedule, timesteps=cfg.model.diffusion_steps ) # Marginal transition node_types = self.dataset_info.node_types.float() x_marginals = node_types / torch.sum(node_types) edge_types = self.dataset_info.edge_types.float() e_marginals = edge_types / torch.sum(edge_types) if not self.use_charge: charge_marginals = node_types.new_zeros(0) else: charge_marginals = ( self.dataset_info.charge_types * node_types[:, None] ).sum(dim=0) print( f"Marginal distribution of the classes: {x_marginals} for nodes, {e_marginals} for edges" ) self.transition_model = MarginalUniformTransition( x_marginals=x_marginals, e_marginals=e_marginals, y_classes=self.out_dims.y, charge_marginals=charge_marginals, ) self.limit_dist = utils.PlaceHolder( X=x_marginals, E=e_marginals, y=torch.ones(self.out_dims.y) / self.out_dims.y, charge=charge_marginals, ) self.save_hyperparameters(ignore=["train_metrics", "sampling_metrics"]) self.log_every_steps = cfg.general.log_every_steps self.number_chain_steps = cfg.general.number_chain_steps def training_step(self, data, i): # The above code is using the Python debugger module `pdb` to set a breakpoint at a specific # line of code. When the code is executed, it will pause at that line and allow you to # interactively debug the program. if data.edge_index.numel() == 0: print("Found a batch with no edges. Skipping.") return # Map discrete classes to one hot encoding data = self.dataset_info.to_one_hot(data) start_time = time.time() sparse_noisy_data = self.apply_sparse_noise(data) if hasattr(self, "apply_noise_time"): self.apply_noise_time.append(round(time.time() - start_time, 2)) # Sample the query edges and build the computational graph = union(noisy graph, query edges) start_time = time.time() # print(data.ptr.diff()) triu_query_edge_index, _ = sample_query_edges( num_nodes_per_graph=data.ptr.diff(), edge_proportion=self.edge_fraction ) query_mask, comp_edge_index, comp_edge_attr = get_computational_graph( triu_query_edge_index=triu_query_edge_index, clean_edge_index=sparse_noisy_data["edge_index_t"], clean_edge_attr=sparse_noisy_data["edge_attr_t"], ) # pass sparse comp_graph to dense comp_graph for ease calculation sparse_noisy_data["comp_edge_index_t"] = comp_edge_index sparse_noisy_data["comp_edge_attr_t"] = comp_edge_attr self.sample_query_time.append(round(time.time() - start_time, 2)) sparse_pred = self.forward(sparse_noisy_data) # Compute the loss on the query edges only sparse_pred.edge_attr = sparse_pred.edge_attr[query_mask] sparse_pred.edge_index = comp_edge_index[:, query_mask] # mask true label for query edges # We have the true edge index at time 0, and the query edge index at time t. This function # merge the query edges and edge index at time 0, delete repeated one, and retune the mask # for the true attr of query edges start_time = time.time() ( query_mask2, true_comp_edge_attr, true_comp_edge_index, ) = mask_query_graph_from_comp_graph( triu_query_edge_index=triu_query_edge_index, edge_index=data.edge_index, edge_attr=data.edge_attr, num_classes=self.out_dims.E, ) query_true_edge_attr = true_comp_edge_attr[query_mask2] assert ( true_comp_edge_index[:, query_mask2] - sparse_pred.edge_index == 0 ).all() self.query_count.append(len(query_true_edge_attr)) true_data = utils.SparsePlaceHolder( node=data.x, charge=data.charge, edge_attr=query_true_edge_attr, edge_index=sparse_pred.edge_index, y=data.y, batch=data.batch, ) true_data.collapse() # Map one-hot to discrete class self.coalesce_time.append(round(time.time() - start_time, 2)) # Loss calculation start_time = time.time() loss = self.train_loss.forward( pred=sparse_pred, true_data=true_data, log=i % self.log_every_steps == 0 ) self.train_metrics( pred=sparse_pred, true_data=true_data, log=i % self.log_every_steps == 0 ) self.loss_time.append(round(time.time() - start_time, 2)) return {"loss": loss} def on_fit_start(self) -> None: print( f"Size of the input features:" f" X {self.in_dims.X}, E {self.in_dims.E}, charge {self.in_dims.charge}, y {self.in_dims.y}" ) if self.local_rank == 0: utils.setup_wandb( self.cfg ) # Initialize wandb only on one process to log metrics only once def on_train_epoch_start(self) -> None: self.print("Starting train epoch...") self.start_epoch_time = time.time() self.train_loss.reset() self.train_metrics.reset() self.query_count = [] self.apply_noise_time = [] self.extra_data_time = [] self.forward_time = [] self.sample_query_time = [] self.coalesce_time = [] self.loss_time = [] self.cycle_time = [] self.eigen_time = [] def on_train_epoch_end(self) -> None: epoch_loss = self.train_loss.log_epoch_metrics() self.print( f"Epoch {self.current_epoch} finished: X: {epoch_loss['train_epoch/x_CE'] :.2f} -- " f"E: {epoch_loss['train_epoch/E_CE'] :.2f} --" f"charge: {epoch_loss['train_epoch/charge_CE'] :.2f} --" f"y: {epoch_loss['train_epoch/y_CE'] :.2f}" ) self.train_metrics.log_epoch_metrics() if wandb.run: wandb.log({"epoch": self.current_epoch}, commit=False) def on_validation_epoch_start(self) -> None: val_metrics = [self.val_nll, self.val_X_kl, self.val_E_kl, self.val_X_logp, self.val_E_logp, self.val_sampling_metrics] if self.use_charge: val_metrics.extend([self.val_charge_kl, self.val_charge_logp]) for metric in val_metrics: metric.reset() def validation_step(self, data, i): data = self.dataset_info.to_one_hot(data) sparse_noisy_data = self.apply_sparse_noise(data) # Sample the query edges and build the computational graph = union(noisy graph, query edges) triu_query_edge_index, _ = sample_query_edges( num_nodes_per_graph=data.ptr.diff(), edge_proportion=self.edge_fraction ) _, comp_edge_index, comp_edge_attr = get_computational_graph( triu_query_edge_index=triu_query_edge_index, clean_edge_index=sparse_noisy_data["edge_index_t"], clean_edge_attr=sparse_noisy_data["edge_attr_t"] ) # pass sparse comp_graph to dense comp_graph for ease calculation sparse_noisy_data["comp_edge_index_t"] = comp_edge_index sparse_noisy_data["comp_edge_attr_t"] = comp_edge_attr sparse_pred = self.forward(sparse_noisy_data) # to dense dense_pred, node_mask = utils.to_dense( x=sparse_pred.node, edge_index=sparse_pred.edge_index, edge_attr=sparse_pred.edge_attr, batch=sparse_pred.batch, charge=sparse_pred.charge, ) dense_original, _ = utils.to_dense( x=data.x, edge_index=data.edge_index, edge_attr=data.edge_attr, batch=data.batch, charge=data.charge, ) noisy_data = utils.densify_noisy_data(sparse_noisy_data) nll = self.compute_val_loss( dense_pred, noisy_data, dense_original.X, dense_original.E, dense_original.y, node_mask, charge=dense_original.charge, test=False, ) return {"loss": nll} def on_validation_epoch_end(self) -> None: metrics = [ self.val_nll.compute(), self.val_X_kl.compute() * self.T, self.val_E_kl.compute() * self.T, self.val_X_logp.compute(), self.val_E_logp.compute(), ] if self.use_charge: metrics += [ self.val_charge_kl.compute() * self.T, self.val_charge_logp.compute(), ] else: metrics += [-1, -1] if self.val_nll.compute() < self.best_nll: self.best_epoch = self.current_epoch self.best_nll = self.val_nll.compute() metrics += [self.best_epoch, self.best_nll] if wandb.run: wandb.log( { "val/epoch_NLL": metrics[0], "val/X_kl": metrics[1], "val/E_kl": metrics[2], "val/X_logp": metrics[3], "val/E_logp": metrics[4], "val/charge_kl": metrics[5], "val/charge_logp": metrics[6], "val/best_nll_epoch": metrics[7], "val/best_nll": metrics[8], }, commit=False, ) self.print( f"Epoch {self.current_epoch}: Val NLL {metrics[0] :.2f} -- Val Atom type KL {metrics[1] :.2f} -- ", f"Val Edge type KL: {metrics[2] :.2f}", ) # Log val nll with default Lightning logger, so it can be monitored by checkpoint callback val_nll = metrics[0] self.log("val/epoch_NLL", val_nll, sync_dist=True) if val_nll < self.best_val_nll: self.best_val_nll = val_nll self.print( "Val loss: %.4f \t Best val loss: %.4f\n" % (val_nll, self.best_val_nll) ) self.val_counter += 1 print("Starting to sample") if self.val_counter % self.cfg.general.sample_every_val == 0: start = time.time() samples_left_to_generate = self.cfg.general.samples_to_generate samples_left_to_save = self.cfg.general.samples_to_save chains_left_to_save = self.cfg.general.chains_to_save # multi gpu operation samples_left_to_generate = math.ceil(samples_left_to_generate / max(self._trainer.num_devices, 1)) self.print( f"Samples to generate: {samples_left_to_generate} for each of the {max(self._trainer.num_devices, 1)} devices" ) print(f"Sampling start on GR{self.global_rank}") print('multi-gpu metrics for uniqueness is not accurate in the validation step.') generated_graphs = [] ident = 0 while samples_left_to_generate > 0: bs = self.cfg.train.batch_size * 2 to_generate = min(samples_left_to_generate, bs) to_save = min(samples_left_to_save, bs) chains_save = min(chains_left_to_save, bs) sampled_batch = self.sample_batch( batch_id=ident, batch_size=to_generate, save_final=to_save, keep_chain=chains_save, number_chain_steps=self.number_chain_steps, ) generated_graphs.append(sampled_batch) ident += to_generate samples_left_to_save -= to_save samples_left_to_generate -= to_generate chains_left_to_save -= chains_save generated_graphs = utils.concat_sparse_graphs(generated_graphs) print( f"Sampled {generated_graphs.batch.max().item()+1} batches on local rank {self.local_rank}. ", "Sampling took {time.time() - start:.2f} seconds\n" ) print("Computing sampling metrics...") self.val_sampling_metrics.compute_all_metrics( generated_graphs, self.current_epoch, local_rank=self.local_rank ) def on_test_epoch_start(self) -> None: print("Starting test...") if self.local_rank == 0: utils.setup_wandb( self.cfg ) # Initialize wandb only on one process to log metrics only once test_metrics = [self.test_nll, self.test_X_kl, self.test_E_kl, self.test_X_logp, self.test_E_logp, self.test_sampling_metrics] if self.use_charge: test_metrics.extend([self.test_charge_kl, self.test_charge_logp]) for metric in test_metrics: metric.reset() def test_step(self, data, i): pass def on_test_epoch_end(self) -> None: """Measure likelihood on a test set and compute stability metrics.""" if self.cfg.general.generated_path: self.print("Loading generated samples...") samples = np.load(self.cfg.general.generated_path) with open(self.cfg.general.generated_path, "rb") as f: samples = pickle.load(f) else: samples_left_to_generate = self.cfg.general.final_model_samples_to_generate samples_left_to_save = self.cfg.general.final_model_samples_to_save chains_left_to_save = self.cfg.general.final_model_chains_to_save # multi gpu operation samples_left_to_generate = math.ceil(samples_left_to_generate / max(self._trainer.num_devices, 1)) self.print( f"Samples to generate: {samples_left_to_generate} for each of the {max(self._trainer.num_devices, 1)} devices" ) print(f"Sampling start on GR{self.global_rank}") samples = [] id = 0 while samples_left_to_generate > 0: print( f"Samples left to generate: {samples_left_to_generate}/" f"{self.cfg.general.final_model_samples_to_generate}", end="", flush=True, ) bs = self.cfg.train.batch_size * 2 to_generate = min(samples_left_to_generate, bs) to_save = min(samples_left_to_save, bs) chains_save = min(chains_left_to_save, bs) sampled_batch = self.sample_batch( batch_id=id, batch_size=to_generate, num_nodes=None, save_final=to_save, keep_chain=chains_save, number_chain_steps=self.number_chain_steps, ) samples.append(sampled_batch) id += to_generate samples_left_to_save -= to_save samples_left_to_generate -= to_generate chains_left_to_save -= chains_save print("Saving the generated graphs") samples = utils.concat_sparse_graphs(samples) filename = f"generated_samples1.txt" # Save the samples list as pickle to a file that depends on the local rank # This is needed to avoid overwriting the same file on different GPUs with open(f"generated_samples_rank{self.local_rank}.pkl", "wb") as f: pickle.dump(samples, f) # This line is used to sync between gpus self._trainer.strategy.barrier() for i in range(2, 10): if os.path.exists(filename): filename = f"generated_samples{i}.txt" else: break with open(filename, "w") as f: for i in range(samples.batch.max().item() + 1): atoms = samples.node[samples.batch == i] f.write(f"N={atoms.shape[0]}\n") atoms = atoms.tolist() f.write("X: \n") for at in atoms: f.write(f"{at} ") f.write("\n") f.write("E: \n") bonds = samples.edge_attr[samples.batch[samples.edge_index[0]] == i] for bond in bonds: f.write(f"{bond} ") f.write("\n") print("Saved.") print("Computing sampling metrics...") # Load the pickles of the other GPUs samples = [] for i in range(self._trainer.num_devices): with open(f"generated_samples_rank{i}.pkl", "rb") as f: samples.append(pickle.load(f)) samples = utils.concat_sparse_graphs(samples) print('saving all samples') with open(f"generated_samples.pkl", "wb") as f: pickle.dump(samples, f) if self.test_variance == 1: to_log, _ = self.test_sampling_metrics.compute_all_metrics( samples, self.current_epoch, self.local_rank ) # save results for testing print('saving results for testing') current_path = os.getcwd() res_path = os.path.join( current_path, f"test_epoch{self.current_epoch}.json", ) with open(res_path, 'w') as file: # Convert the dictionary to a JSON string and write it to the file json.dump(to_log, file) else: to_log = {} for i in range(self.test_variance): start_idx = int(self.cfg.general.final_model_samples_to_generate / self.test_variance * i) end_idx = int(self.cfg.general.final_model_samples_to_generate / self.test_variance * (i + 1)) cur_samples = utils.split_samples(samples, start_idx, end_idx) cur_to_log, _ = self.test_sampling_metrics.compute_all_metrics(cur_samples, self.current_epoch, self.local_rank) if i == 0: to_log = {i: [cur_to_log[i]] for i in cur_to_log} else: to_log = {i: to_log[i].append(cur_to_log[i]) for i in cur_to_log} # get the variance and mean value of the metrics final_to_log = {i: [np.mean(i), np.var(i)] for i in to_log} to_log.update(final_to_log) # save results for testing print('saving results for testing') current_path = os.getcwd() res_path = os.path.join( current_path, f"test_epoch{self.current_epoch}_fold{self.test_variance}.json", ) with open(res_path, 'w') as file: # Convert the dictionary to a JSON string and write it to the file json.dump(to_log, file) print("Test sampling metrics computed.") def apply_sparse_noise(self, data): """Sample noise and apply it to the data.""" bs = int(data.batch.max() + 1) t_int = torch.randint( 1, self.T + 1, size=(bs, 1), device=self.device ).float() # (bs, 1) s_int = t_int - 1 t_float = t_int / self.T s_float = s_int / self.T # beta_t and alpha_s_bar are used for denoising/loss computation beta_t = self.noise_schedule(t_normalized=t_float) # (bs, 1) alpha_s_bar = self.noise_schedule.get_alpha_bar(t_normalized=s_float) # (bs, 1) alpha_t_bar = self.noise_schedule.get_alpha_bar(t_normalized=t_float) # (bs, 1) Qtb = self.transition_model.get_Qt_bar( alpha_t_bar, device=self.device ) # (bs, dx_in, dx_out), (bs, de_in, de_out) assert (abs(Qtb.X.sum(dim=2) - 1.0) < 1e-4).all(), Qtb.X.sum(dim=2) - 1 assert (abs(Qtb.E.sum(dim=2) - 1.0) < 1e-4).all() # Compute transition probabilities # get charge distribution if self.use_charge: prob_charge = data.charge.unsqueeze(1) @ Qtb.charge[data.batch] charge_t = prob_charge.squeeze(1).multinomial(1).flatten() # (N, ) charge_t = F.one_hot(charge_t, num_classes=self.out_dims.charge) else: charge_t = data.charge # Diffuse sparse nodes and sample sparse node labels probN = data.x.unsqueeze(1) @ Qtb.X[data.batch] # (N, dx) node_t = probN.squeeze(1).multinomial(1).flatten() # (N, ) # count node numbers and edge numbers for existing edges for each graph num_nodes = data.ptr.diff().long() batch_edge = data.batch[data.edge_index[0]] num_edges = torch.zeros(num_nodes.shape).to(self.device) unique, counts = torch.unique(batch_edge, sorted=True, return_counts=True) num_edges[unique] = counts.float() # count number of non-existing edges for each graph num_neg_edge = ((num_nodes - 1) * num_nodes - num_edges) / 2 # (bs, ) # Step1: diffuse on existing edges # get edges defined in the top triangle of the adjacency matrix dir_edge_index, dir_edge_attr = utils.undirected_to_directed( data.edge_index, data.edge_attr ) batch_edge = data.batch[dir_edge_index[0]] batch_Qtb = Qtb.E[batch_edge] probE = dir_edge_attr.unsqueeze(1) @ batch_Qtb dir_edge_attr = probE.squeeze(1).multinomial(1).flatten() # Step2: diffuse on non-existing edges # get number of new edges according to Qtb emerge_prob = Qtb.E[:, 0, 1:].sum(-1) # (bs, ) num_emerge_edges = ( torch.distributions.binomial.Binomial(num_neg_edge, emerge_prob) .sample() .int() ) # combine existing and non-existing edges (both are directed, i.e. triu) if num_emerge_edges.max() > 0: # sample non-existing edges neg_edge_index = sample_non_existing_edges_batched( num_edges_to_sample=num_emerge_edges, existing_edge_index=dir_edge_index, num_nodes=num_nodes, batch=data.batch, ) neg_edge_attr = sample_non_existing_edge_attr( query_edges_dist_batch=Qtb.E[:, 0, 1:], num_edges_to_sample=num_emerge_edges, ) E_t_attr = torch.hstack([dir_edge_attr, neg_edge_attr]) E_t_index = torch.hstack([dir_edge_index, neg_edge_index]) else: E_t_attr = dir_edge_attr E_t_index = dir_edge_index # mask non-existing edges mask = E_t_attr != 0 E_t_attr = E_t_attr[mask] E_t_index = E_t_index[:, mask] E_t_index, E_t_attr = utils.to_undirected(E_t_index, E_t_attr) E_t_attr = F.one_hot(E_t_attr, num_classes=self.out_dims.E) node_t = F.one_hot(node_t, num_classes=self.out_dims.X) sparse_noisy_data = { "t_int": t_int, "t_float": t_float, "beta_t": beta_t, "alpha_s_bar": alpha_s_bar, "alpha_t_bar": alpha_t_bar, "node_t": node_t, "edge_index_t": E_t_index, "edge_attr_t": E_t_attr, "comp_edge_index_t": None, "comp_edge_attr_t": None, # computational graph "y_t": data.y, "batch": data.batch, "ptr": data.ptr, "charge_t": charge_t, } return sparse_noisy_data def compute_val_loss(self, pred, noisy_data, X, E, y, node_mask, charge, test): """Computes an estimator for the variational lower bound. pred: (batch_size, n, total_features) noisy_data: dict X, E, y : (bs, n, dx), (bs, n, n, de), (bs, dy) node_mask : (bs, n) Output: nll (size 1) """ t = noisy_data["t_float"] # 1. N = node_mask.sum(1).long() log_pN = self.node_dist.log_prob(N) # 2. The KL between q(z_T | x) and p(z_T) = Uniform(1/num_classes). Should be close to zero. kl_prior = self.kl_prior(X, E, node_mask, charge=charge) # 3. Diffusion loss loss_all_t = self.compute_Lt( X, E, y, charge, pred, noisy_data, node_mask, test=test ) # Combine terms nlls = - log_pN + kl_prior + loss_all_t assert (~nlls.isnan()).all(), f"NLLs contain NaNs: {nlls}" assert len(nlls.shape) == 1, f"{nlls.shape} has more than only batch dim." # Update NLL metric object and return batch nll nll = (self.test_nll if test else self.val_nll)(nlls) # Average over the batch if wandb.run: wandb.log( { "kl prior": kl_prior.mean(), "Estimator loss terms": loss_all_t.mean(), "log_pn": log_pN.mean(), "val_nll": nll, "epoch": self.current_epoch }, commit=False, ) return nll def kl_prior(self, X, E, node_mask, charge): """Computes the KL between q(z1 | x) and the prior p(z1) = Normal(0, 1). This is essentially a lot of work for something that is in practice negligible in the loss. However, you compute it so that you see it when you've made a mistake in your noise schedule. """ # Compute the last alpha value, alpha_T. ones = torch.ones((X.size(0), 1), device=X.device) Ts = self.T * ones alpha_t_bar = self.noise_schedule.get_alpha_bar(t_int=Ts) # (bs, 1) Qtb = self.transition_model.get_Qt_bar(alpha_t_bar, self.device) # Compute transition probabilities probX = X @ Qtb.X # (bs, n, dx_out) probE = E @ Qtb.E.unsqueeze(1) # (bs, n, n, de_out) assert probX.shape == X.shape bs, n, _ = probX.shape limit_X = self.limit_dist.X[None, None, :].expand(bs, n, -1).type_as(probX) limit_E = ( self.limit_dist.E[None, None, None, :].expand(bs, n, n, -1).type_as(probE) ) if self.use_charge: prob_charge = charge @ Qtb.charge # (bs, n, de_out) limit_charge = ( self.limit_dist.charge[None, None, :] .expand(bs, n, -1) .type_as(prob_charge) ) limit_charge = limit_charge.clone() else: prob_charge = limit_charge = None # Make sure that masked rows do not contribute to the loss ( limit_dist_X, limit_dist_E, probX, probE, limit_dist_charge, prob_charge, ) = diffusion_utils.mask_distributions( true_X=limit_X.clone(), true_E=limit_E.clone(), pred_X=probX, pred_E=probE, node_mask=node_mask, true_charge=limit_charge, pred_charge=prob_charge, ) kl_distance_X = F.kl_div( input=probX.log(), target=limit_dist_X, reduction="none" ) kl_distance_E = F.kl_div( input=probE.log(), target=limit_dist_E, reduction="none" ) # not all edges are used for loss calculation E_mask = torch.logical_or( kl_distance_E.sum(-1).isnan(), kl_distance_E.sum(-1).isinf() ) kl_distance_E[E_mask] = 0 X_mask = torch.logical_or( kl_distance_X.sum(-1).isnan(), kl_distance_X.sum(-1).isinf() ) kl_distance_X[X_mask] = 0 loss = diffusion_utils.sum_except_batch( kl_distance_X ) + diffusion_utils.sum_except_batch(kl_distance_E) # The above code is using the Python debugger module `pdb` to set a breakpoint in the code. # When the code is executed, it will pause at this line and allow you to interactively debug # the program. if self.use_charge: kl_distance_charge = F.kl_div( input=prob_charge.log(), target=limit_dist_charge, reduction="none" ) kl_distance_charge[X_mask] = 0 loss = loss + diffusion_utils.sum_except_batch(kl_distance_charge) assert (~loss.isnan()).any() return loss def compute_Lt(self, X, E, y, charge, pred, noisy_data, node_mask, test): pred_probs_X = F.softmax(pred.X, dim=-1) pred_probs_E = F.softmax(pred.E, dim=-1) if self.use_charge: pred_probs_charge = F.softmax(pred.charge, dim=-1) else: pred_probs_charge = None charge = None Qtb = self.transition_model.get_Qt_bar(noisy_data["alpha_t_bar"], self.device) Qsb = self.transition_model.get_Qt_bar(noisy_data["alpha_s_bar"], self.device) Qt = self.transition_model.get_Qt(noisy_data["beta_t"], self.device) # Compute distributions to compare with KL bs, n, d = X.shape prob_true = diffusion_utils.posterior_distributions( X=X, E=E, X_t=noisy_data["X_t"], E_t=noisy_data["E_t"], charge=charge, charge_t=noisy_data["charge_t"], y_t=noisy_data["y_t"], Qt=Qt, Qsb=Qsb, Qtb=Qtb, ) prob_true.E = prob_true.E.reshape((bs, n, n, -1)) prob_pred = diffusion_utils.posterior_distributions( X=pred_probs_X, E=pred_probs_E, X_t=noisy_data["X_t"], E_t=noisy_data["E_t"], charge=pred_probs_charge, charge_t=noisy_data["charge_t"], y_t=noisy_data["y_t"], Qt=Qt, Qsb=Qsb, Qtb=Qtb, ) prob_pred.E = prob_pred.E.reshape((bs, n, n, -1)) # Reshape and filter masked rows ( prob_true_X, prob_true_E, prob_pred.X, prob_pred.E, prob_true.charge, prob_pred.charge, ) = diffusion_utils.mask_distributions( true_X=prob_true.X, true_E=prob_true.E, pred_X=prob_pred.X, pred_E=prob_pred.E, node_mask=node_mask, true_charge=prob_true.charge, pred_charge=prob_pred.charge, ) kl_x = (self.test_X_kl if test else self.val_X_kl)(prob_true_X, torch.log(prob_pred.X)) kl_e = (self.test_E_kl if test else self.val_E_kl)(prob_true_E, torch.log(prob_pred.E)) assert (~(kl_x + kl_e).isnan()).any() loss = kl_x + kl_e if self.use_charge: kl_charge = (self.test_charge_kl if test else self.val_charge_kl)( prob_true.charge, torch.log(prob_pred.charge) ) assert (~(kl_charge).isnan()).any() loss = loss + kl_charge return self.T * loss def reconstruction_logp(self, t, X, E, node_mask, charge): # Compute noise values for t = 0. t_zeros = torch.zeros_like(t) beta_0 = self.noise_schedule(t_zeros) Q0 = self.transition_model.get_Qt(beta_t=beta_0, device=self.device) probX0 = X @ Q0.X # (bs, n, dx_out) probE0 = E @ Q0.E.unsqueeze(1) # (bs, n, n, de_out) prob_charge0 = None if self.use_charge: prob_charge0 = charge @ Q0.charge sampled0 = diffusion_utils.sample_discrete_features( probX=probX0, probE=probE0, node_mask=node_mask, prob_charge=prob_charge0 ) X0 = F.one_hot(sampled0.X, num_classes=self.out_dims.X).float() E0 = F.one_hot(sampled0.E, num_classes=self.out_dims.E).float() y0 = sampled0.y assert (X.shape == X0.shape) and (E.shape == E0.shape) charge0 = X0.new_zeros((*X0.shape[:-1], 0)) if self.use_charge: charge0 = F.one_hot( sampled0.charge, num_classes=self.out_dims.charge ).float() sampled_0 = utils.PlaceHolder(X=X0, E=E0, y=y0, charge=charge0).mask(node_mask) # Predictions noisy_data = { "X_t": sampled_0.X, "E_t": sampled_0.E, "y_t": sampled_0.y, "node_mask": node_mask, "t_int": torch.zeros((X0.shape[0], 1), dtype=torch.long).to(self.device), "t_float": torch.zeros((X0.shape[0], 1), dtype=torch.float).to(self.device), "charge_t": sampled_0.charge, } sparse_noisy_data = utils.to_sparse( noisy_data["X_t"], noisy_data["E_t"], noisy_data["y_t"], node_mask, charge=noisy_data["charge_t"], ) noisy_data.update(sparse_noisy_data) noisy_data["comp_edge_index_t"] = sparse_noisy_data["edge_index_t"] noisy_data["comp_edge_attr_t"] = sparse_noisy_data["edge_attr_t"] pred0 = self.forward(noisy_data) pred0, _ = utils.to_dense( pred0.node, pred0.edge_index, pred0.edge_attr, pred0.batch, pred0.charge ) # Normalize predictions probX0 = F.softmax(pred0.X, dim=-1) probE0 = F.softmax(pred0.E, dim=-1) # Set masked rows to arbitrary values that don't contribute to loss probX0[~node_mask] = torch.ones(self.out_dims.X).type_as(probX0) probE0[~(node_mask.unsqueeze(1) * node_mask.unsqueeze(2))] = torch.ones( self.out_dims.E ).type_as(probE0) diag_mask = torch.eye(probE0.size(1)).type_as(probE0).bool() diag_mask = diag_mask.unsqueeze(0).expand(probE0.size(0), -1, -1) probE0[diag_mask] = torch.ones(self.out_dims.E).type_as(probE0) assert (~probX0.isnan()).any() assert (~probE0.isnan()).any() prob_charge0 = charge if self.use_charge: prob_charge0 = F.softmax(pred0.charge, dim=-1) prob_charge0[~node_mask] = torch.ones(self.out_dims.charge).type_as( prob_charge0 ) assert (~prob_charge0.isnan()).any() return utils.PlaceHolder(X=probX0, E=probE0, y=None, charge=prob_charge0) def forward_sparse(self, sparse_noisy_data): start_time = time.time() node = sparse_noisy_data["node_t"] edge_attr = sparse_noisy_data["edge_attr_t"].float() edge_index = sparse_noisy_data["edge_index_t"].to(torch.int64) y = sparse_noisy_data["y_t"] batch = sparse_noisy_data["batch"].long() if hasattr(self, "forward_time"): self.forward_time.append(round(time.time() - start_time, 2)) return self.model(node, edge_attr, edge_index, y, batch) def forward(self, noisy_data): """ noisy data contains: node_t, comp_edge_index_t, comp_edge_attr_t, batch """ # build the sparse_noisy_data for the forward function of the sparse model start_time = time.time() sparse_noisy_data = self.compute_extra_data(sparse_noisy_data=noisy_data) if self.sign_net and self.cfg.model.extra_features == "all": x = self.sign_net( sparse_noisy_data["node_t"], sparse_noisy_data["edge_index_t"], sparse_noisy_data["batch"], ) sparse_noisy_data["node_t"] = torch.hstack( [sparse_noisy_data["node_t"], x] ) if hasattr(self, "extra_data_time"): self.extra_data_time.append(round(time.time() - start_time, 2)) return self.forward_sparse(sparse_noisy_data) @torch.no_grad() def sample_batch( self, batch_id: int, batch_size: int, keep_chain: int, number_chain_steps: int, save_final: int, num_nodes=None, ): """ :param batch_id: int :param batch_size: int :param num_nodes: int, <int>tensor (batch_size) (optional) for specifying number of nodes :param save_final: int: number of predictions to save to file :param keep_chain: int: number of chains to save to file :param keep_chain_steps: number of timesteps to save for each chain :return: molecule_list. Each element of this list is a tuple (node_types, charge, positions) """ if num_nodes is None: num_nodes = self.node_dist.sample_n(batch_size, self.device) elif type(num_nodes) == int: num_nodes = num_nodes * torch.ones( batch_size, device=self.device, dtype=torch.int ) else: assert isinstance(num_nodes, torch.Tensor) num_nodes = num_nodes num_max = torch.max(num_nodes) # Build the masks arange = ( torch.arange(num_max, device=self.device) .unsqueeze(0) .expand(batch_size, -1) ) node_mask = arange < num_nodes.unsqueeze(1) # Sample noise -- z has size ( num_samples, num_nodes, num_features) sparse_sampled_data = diffusion_utils.sample_sparse_discrete_feature_noise( limit_dist=self.limit_dist, node_mask=node_mask ) assert number_chain_steps < self.T chain = utils.SparseChainPlaceHolder(keep_chain=keep_chain) # Iteratively sample p(z_s | z_t) for t = 1, ..., T, with s = t - 1. for s_int in tqdm(reversed(range(self.T)), total=self.T): s_array = (s_int * torch.ones((batch_size, 1))).to(self.device) t_array = s_array + 1 s_norm = s_array / self.T t_norm = t_array / self.T # Sample z_s sparse_sampled_data = self.sample_p_zs_given_zt( s_norm, t_norm, sparse_sampled_data ) # keep_chain can be very small, e.g., 1 if ((s_int * number_chain_steps) % self.T == 0) and (keep_chain != 0): chain.append(sparse_sampled_data) # get generated graphs generated_graphs = sparse_sampled_data.to_device("cpu") generated_graphs.edge_attr = sparse_sampled_data.edge_attr.argmax(-1) generated_graphs.node = sparse_sampled_data.node.argmax(-1) if self.use_charge: generated_graphs.charge = sparse_sampled_data.charge.argmax(-1) - 1 if self.visualization_tools is not None: current_path = os.getcwd() # Visualize chains if keep_chain > 0: print("Visualizing chains...") chain_path = os.path.join( current_path, f"chains/{self.cfg.general.name}/" f"epoch{self.current_epoch}/", ) try: _ = self.visualization_tools.visualize_chain( chain_path, batch_id, chain, local_rank=self.local_rank ) except OSError: print("Warn: image chains failed to be visualized ") # Visualize the final molecules print("\nVisualizing molecules...") result_path = os.path.join( current_path, f"graphs/{self.name}/epoch{self.current_epoch}_b{batch_id}/", ) try: self.visualization_tools.visualize( result_path, generated_graphs, save_final, local_rank=self.local_rank, ) except OSError: print("Warn: image failed to be visualized ") print("Done.") return generated_graphs def sample_node(self, pred_X, p_s_and_t_given_0_X, node_mask): # Normalize predictions pred_X = F.softmax(pred_X, dim=-1) # bs, n, d0 # Dim of these two tensors: bs, N, d0, d_t-1 weighted_X = pred_X.unsqueeze(-1) * p_s_and_t_given_0_X # bs, n, d0, d_t-1 unnormalized_prob_X = weighted_X.sum(dim=2) # bs, n, d_t-1 unnormalized_prob_X[torch.sum(unnormalized_prob_X, dim=-1) == 0] = 1e-5 prob_X = unnormalized_prob_X / torch.sum( unnormalized_prob_X, dim=-1, keepdim=True ) # bs, n, d_t assert ((prob_X.sum(dim=-1) - 1).abs() < 1e-4).all() X_t = diffusion_utils.sample_discrete_node_features(prob_X, node_mask) return X_t, prob_X def sample_edge(self, pred_E, p_s_and_t_given_0_E, node_mask): # Normalize predictions bs, n, n, de = pred_E.shape pred_E = F.softmax(pred_E, dim=-1) # bs, n, n, d0 pred_E = pred_E.reshape((bs, -1, pred_E.shape[-1])) weighted_E = pred_E.unsqueeze(-1) * p_s_and_t_given_0_E # bs, N, d0, d_t-1 unnormalized_prob_E = weighted_E.sum(dim=-2) unnormalized_prob_E[torch.sum(unnormalized_prob_E, dim=-1) == 0] = 1e-5 prob_E = unnormalized_prob_E / torch.sum( unnormalized_prob_E, dim=-1, keepdim=True ) prob_E = prob_E.reshape(bs, n, n, de) assert ((prob_E.sum(dim=-1) - 1).abs() < 1e-4).all() E_t = diffusion_utils.sample_discrete_edge_features(prob_E, node_mask) return E_t, prob_E def sample_node_edge( self, pred, p_s_and_t_given_0_X, p_s_and_t_given_0_E, node_mask ): _, prob_X = self.sample_node(pred.X, p_s_and_t_given_0_X, node_mask) _, prob_E = self.sample_edge(pred.E, p_s_and_t_given_0_E, node_mask) sampled_s = diffusion_utils.sample_discrete_features( prob_X, prob_E, node_mask=node_mask ) return sampled_s def sample_sparse_node(self, pred_node, p_s_and_t_given_0_X): # Normalize predictions pred_X = F.softmax(pred_node, dim=-1) # N, dx # Dim of the second tensor: N, dx, dx weighted_X = pred_X.unsqueeze(-1) * p_s_and_t_given_0_X # N, dx, dx unnormalized_prob_X = weighted_X.sum(dim=1) # N, dx unnormalized_prob_X[ torch.sum(unnormalized_prob_X, dim=-1) == 0 ] = 1e-5 # TODO: delete/masking? prob_X = unnormalized_prob_X / torch.sum( unnormalized_prob_X, dim=-1, keepdim=True ) # N, dx assert ((prob_X.sum(dim=-1) - 1).abs() < 1e-4).all() X_t = prob_X.multinomial(1)[:, 0] return X_t def sample_sparse_edge(self, pred_edge, p_s_and_t_given_0_E): # Normalize predictions pred_E = F.softmax(pred_edge, dim=-1) # N, d0 # Dim of the second tensor: N, d0, dt-1 weighted_E = pred_E.unsqueeze(-1) * p_s_and_t_given_0_E # N, d0, dt-1 unnormalized_prob_E = weighted_E.sum(dim=1) # N, dt-1 unnormalized_prob_E[torch.sum(unnormalized_prob_E, dim=-1) == 0] = 1e-5 prob_E = unnormalized_prob_E / torch.sum( unnormalized_prob_E, dim=-1, keepdim=True ) assert ((prob_E.sum(dim=-1) - 1).abs() < 1e-4).all() E_t = prob_E.multinomial(1)[:, 0] return E_t def sample_sparse_node_edge( self, pred_node, pred_edge, p_s_and_t_given_0_X, p_s_and_t_given_0_E, pred_charge, p_s_and_t_given_0_charge, ): sampled_node = self.sample_sparse_node(pred_node, p_s_and_t_given_0_X).long() sampled_edge = self.sample_sparse_edge(pred_edge, p_s_and_t_given_0_E).long() if pred_charge.size(-1) > 0: sampled_charge = self.sample_sparse_node( pred_charge, p_s_and_t_given_0_charge ).long() else: sampled_charge = pred_charge return sampled_node, sampled_edge, sampled_charge def sample_p_zs_given_zt(self, s_float, t_float, data): """ Samples from zs ~ p(zs | zt). Only used during sampling. if last_step, return the graph prediction as well """ node = data.node edge_index = data.edge_index edge_attr = data.edge_attr y = data.y charge = data.charge ptr = data.ptr batch = data.batch beta_t = self.noise_schedule(t_normalized=t_float) # (bs, 1) alpha_s_bar = self.noise_schedule.get_alpha_bar(t_normalized=s_float) alpha_t_bar = self.noise_schedule.get_alpha_bar(t_normalized=t_float) # Retrieve transitions matrix Qtb = self.transition_model.get_Qt_bar(alpha_t_bar, self.device) Qsb = self.transition_model.get_Qt_bar(alpha_s_bar, self.device) Qt = self.transition_model.get_Qt(beta_t, self.device) # Prior distribution # (N, dx, dx) p_s_and_t_given_0_X = ( diffusion_utils.compute_sparse_batched_over0_posterior_distribution( input_data=node, batch=batch, Qt=Qt.X, Qsb=Qsb.X, Qtb=Qtb.X ) ) p_s_and_t_given_0_charge = None if self.use_charge: p_s_and_t_given_0_charge = ( diffusion_utils.compute_sparse_batched_over0_posterior_distribution( input_data=charge, batch=batch, Qt=Qt.charge, Qsb=Qsb.charge, Qtb=Qtb.charge, ) ) # prepare sparse information num_nodes = ptr.diff().long() num_edges = (num_nodes * (num_nodes - 1) / 2).long() # If we had one graph, we will iterate on all edges for each step # we also make sure that the non existing edge number remains the same with the training process ( all_condensed_index, all_edge_batch, all_edge_mask, ) = sampled_condensed_indices_uniformly( max_condensed_value=num_edges, num_edges_to_sample=num_edges, return_mask=True, ) # double checked # number of edges used per loop for each graph num_edges_per_loop = torch.ceil(self.edge_fraction * num_edges) # (bs, ) len_loop = math.ceil(1. / self.edge_fraction) new_edge_index, new_edge_attr, new_charge = ( torch.zeros((2, 0), device=self.device, dtype=torch.long), torch.zeros(0, device=self.device), torch.zeros(0, device=self.device, dtype=torch.long), ) # create the new data for calculation sparse_noisy_data = { "node_t": node, "edge_index_t": edge_index, "edge_attr_t": edge_attr, "batch": batch, "y_t": y, "ptr": ptr, "charge_t": charge, "t_int": (t_float * self.T).int(), "t_float": t_float, } for i in range(len_loop): if self.autoregressive and i != 0: sparse_noisy_data["edge_index_t"] = new_edge_index sparse_noisy_data["edge_attr_t"] = new_edge_attr # the last loop might have less edges, we need to make sure that each loop has the same number of edges if i == len_loop - 1: edges_to_consider_mask = all_edge_mask >= ( num_edges[all_edge_batch] - num_edges_per_loop[all_edge_batch] ) else: # [0, 3, 2, 1, 0, 3, 2, 1, 0, 3, 2, 1] # all_condensed_index is not sorted inside the graph, but it sorted for graph batch edges_to_consider_mask = torch.logical_and( all_edge_mask >= num_edges_per_loop[all_edge_batch] * i, all_edge_mask < num_edges_per_loop[all_edge_batch] * (i + 1), ) # get query edges and pass to matrix index triu_query_edge_index = all_condensed_index[edges_to_consider_mask] query_edge_batch = all_edge_batch[edges_to_consider_mask]
triu_query_edge_index = condensed_to_matrix_index_batch(
5
2023-10-30 12:12:16+00:00
16k
akekic/causal-component-analysis
experiments/nonparam_ident/main.py
[ { "identifier": "DGP", "path": "config.py", "snippet": "DGP = {\n \"graph-4-0\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 128, # D\n },\n \"graph-4-1\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 128, # D\n },\n \"graph-4-2\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0, 0],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 75_000,\n \"observation_dim\": 128, # D\n },\n \"graph-4-3\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0, 0],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 75_000,\n \"observation_dim\": 128, # D\n },\n \"graph-4-4\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0, 0],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 12_500,\n \"observation_dim\": 128, # D\n },\n \"graph-4-5\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0], [1, 0, 1, 0]]\n ),\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0, 0],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 75_000,\n \"observation_dim\": 128, # D\n },\n \"graph-4-6\": {\n \"num_causal_variables\": 10, # N\n \"adj_matrix\": np.array(\n [\n [0, 1, 1, 1, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 1, 1, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 1, 1, 0],\n ]\n ),\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 75_000,\n \"observation_dim\": 128, # D\n },\n \"graph-4-7\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0, 0],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 100_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-8\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0, 0],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-9\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-10\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-9-local\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 2_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-random-1\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": None,\n \"edge_prob\": 0.5,\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-random-p000\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-random-p025\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": None,\n \"edge_prob\": 0.25,\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-random-p050\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": None,\n \"edge_prob\": 0.5,\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-random-p075\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": None,\n \"edge_prob\": 0.75,\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-random-p100\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": None,\n \"edge_prob\": 1.0,\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 4, # D\n },\n \"graph-4-random-1-local\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": None,\n \"edge_prob\": 0.5,\n \"int_targets\": torch.tensor(\n [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n ),\n \"num_samples_per_env\": 2_000,\n \"observation_dim\": 4, # D\n },\n \"graph-7-random-1\": {\n \"num_causal_variables\": 7, # N\n \"adj_matrix\": None,\n \"edge_prob\": 0.5,\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 7, # D\n },\n \"graph-7-random-1-local\": {\n \"num_causal_variables\": 7, # N\n \"adj_matrix\": None,\n \"edge_prob\": 0.5,\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 2_000,\n \"observation_dim\": 7, # D\n },\n \"graph-2-1\": {\n \"num_causal_variables\": 2, # N\n \"adj_matrix\": np.array([[0, 1], [0, 0]]),\n \"int_targets\": torch.tensor(\n [\n [0, 0],\n [1, 0],\n [0, 1],\n ]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 2, # D\n },\n \"graph-2-2\": {\n \"num_causal_variables\": 2, # N\n \"adj_matrix\": np.array([[0, 0], [0, 0]]),\n \"int_targets\": torch.tensor(\n [\n [0, 0],\n [1, 0],\n [0, 1],\n ]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 2, # D\n },\n \"graph-3-1\": {\n \"num_causal_variables\": 3, # N\n \"adj_matrix\": np.array([[0, 1, 1], [0, 0, 0], [0, 0, 0]]),\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0],\n [1, 0, 0],\n [0, 1, 0],\n [0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 3, # D\n },\n \"graph-3-random-1\": {\n \"num_causal_variables\": 3, # N\n \"adj_matrix\": None,\n \"edge_prob\": 0.5,\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0],\n [1, 0, 0],\n [0, 1, 0],\n [0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 3, # D\n },\n \"graph-5-random-1\": {\n \"num_causal_variables\": 5, # N\n \"adj_matrix\": None,\n \"edge_prob\": 0.5,\n \"int_targets\": torch.tensor(\n [\n [0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 0, 1, 0],\n [0, 0, 0, 0, 1],\n ]\n ),\n \"num_samples_per_env\": 200_000,\n \"observation_dim\": 5, # D\n },\n}" }, { "identifier": "MultiEnvDataModule", "path": "data_generator/data_module.py", "snippet": "class MultiEnvDataModule(LightningDataModule):\n \"\"\"\n Data module for multi-environment data.\n\n Attributes\n ----------\n medgp: MultiEnvDGP\n Multi-environment data generating process.\n num_samples_per_env: int\n Number of samples per environment.\n batch_size: int\n Batch size.\n num_workers: int\n Number of workers for the data loaders.\n intervention_targets_per_env: Tensor, shape (num_envs, num_causal_variables)\n Intervention targets per environment, with 1 indicating that the variable is intervened on.\n log_dir: Optional[Path]\n Directory to save summary statistics and plots to. Default: None.\n intervention_target_misspec: bool\n Whether to misspecify the intervention targets. If true, the intervention targets are permuted.\n I.e. the model received the wrong intervention targets. Default: False.\n intervention_target_perm: Optional[list[int]]\n Permutation of the intervention targets. If None, a random permutation is used. Only used if\n intervention_target_misspec is True. Default: None.\n\n Methods\n -------\n setup(stage=None) -> None\n Setup the data module. This is where the data is sampled.\n train_dataloader() -> DataLoader\n Return the training data loader.\n val_dataloader() -> DataLoader\n Return the validation data loader.\n test_dataloader() -> DataLoader\n Return the test data loader.\n \"\"\"\n\n def __init__(\n self,\n multi_env_dgp: MultiEnvDGP,\n num_samples_per_env: int,\n batch_size: int,\n num_workers: int,\n intervention_targets_per_env: Tensor,\n log_dir: Optional[Path] = None,\n intervention_target_misspec: bool = False,\n intervention_target_perm: Optional[list[int]] = None,\n ) -> None:\n super().__init__()\n self.medgp = multi_env_dgp\n self.num_samples_per_env = num_samples_per_env\n self.batch_size = batch_size\n self.num_workers = num_workers\n self.intervention_targets_per_env = intervention_targets_per_env\n self.log_dir = log_dir\n\n self.intervention_target_misspec = intervention_target_misspec\n latent_dim = self.medgp.latent_scm.latent_dim\n assert (\n intervention_target_perm is None\n or len(intervention_target_perm) == latent_dim\n )\n self.intervention_target_perm = intervention_target_perm\n\n def setup(self, stage: Optional[str] = None) -> None:\n latent_dim = self.medgp.latent_scm.latent_dim\n num_envs = self.intervention_targets_per_env.shape[0]\n\n x, v, u, e, intervention_targets, log_prob = self.medgp.sample(\n self.num_samples_per_env,\n intervention_targets_per_env=self.intervention_targets_per_env,\n )\n if self.intervention_target_misspec:\n assert (\n num_envs == latent_dim + 1\n ), \"only works if num_envs == num_causal_variables + 1\"\n if self.intervention_target_perm is None:\n perm = random_perm(latent_dim)\n self.intervention_target_perm = perm\n else:\n perm = self.intervention_target_perm\n\n # remember where old targets were\n idx_mask_list = []\n for i in range(latent_dim):\n idx_mask = intervention_targets[:, i] == 1\n idx_mask_list.append(idx_mask)\n intervention_targets[idx_mask, i] = 0\n\n # permute targets\n for i in range(latent_dim):\n intervention_targets[idx_mask_list[i], perm[i]] = 1\n\n dataset = TensorDataset(x, v, u, e, intervention_targets, log_prob)\n train_size = int(0.8 * len(dataset))\n val_size = int(0.5 * (len(dataset) - train_size))\n test_size = len(dataset) - train_size - val_size\n (\n self.train_dataset,\n self.val_dataset,\n self.test_dataset,\n ) = torch.utils.data.random_split(dataset, [train_size, val_size, test_size])\n\n if self.log_dir is not None:\n self.log_dir.mkdir(parents=True, exist_ok=True)\n summary_stats = summary_statistics(x, v, e, intervention_targets)\n for key, value in summary_stats.items():\n value.to_csv(self.log_dir / f\"{key}_summary_stats.csv\")\n plot_dag(self.medgp.adjacency_matrix, self.log_dir)\n try:\n with open(self.log_dir / \"base_coeff_values.txt\", \"w\") as f:\n f.write(str(self.medgp.latent_scm.base_coeff_values))\n except AttributeError:\n pass\n # save mixing function coefficients\n self.medgp.mixing_function.save_coeffs(self.log_dir)\n\n def train_dataloader(self) -> DataLoader:\n return DataLoader(\n self.train_dataset,\n batch_size=self.batch_size,\n shuffle=True,\n num_workers=self.num_workers,\n )\n\n def val_dataloader(self) -> DataLoader:\n val_loader = DataLoader(\n self.val_dataset,\n batch_size=self.batch_size,\n shuffle=True,\n num_workers=self.num_workers,\n )\n return val_loader\n\n def test_dataloader(self) -> DataLoader:\n test_loader = DataLoader(\n self.test_dataset,\n batch_size=self.batch_size,\n shuffle=False,\n num_workers=self.num_workers,\n )\n return test_loader" }, { "identifier": "make_multi_env_dgp", "path": "data_generator/multi_env_gdp.py", "snippet": "def make_multi_env_dgp(\n latent_dim: int,\n observation_dim: int,\n adjacency_matrix: np.ndarray,\n intervention_targets_per_env: Tensor,\n shift_noise: bool = True,\n noise_shift_type: str = \"mean\",\n mixing: str = \"nonlinear\",\n scm: str = \"linear\",\n n_nonlinearities: int = 1,\n scm_coeffs_low: float = -1,\n scm_coeffs_high: float = 1,\n coeffs_min_abs_value: float = None,\n edge_prob: float = None,\n snr: float = 1.0,\n) -> MultiEnvDGP:\n \"\"\"\n Create a multi-environment data generating process (DGP).\n\n Parameters\n ----------\n latent_dim: int\n Dimension of the latent variables.\n observation_dim: int\n Dimension of the observed variables.\n adjacency_matrix: np.ndarray, shape (latent_dim, latent_dim)\n Adjacency matrix of the latent SCM.\n intervention_targets_per_env: Tensor, shape (num_envs, latent_dim)\n Intervention targets per environment, with 1 indicating that the variable is intervened on\n and 0 indicating that the variable is not intervened on. This variable also implicitly defines\n the number of environments.\n shift_noise: bool\n Whether to shift the noise distribution for variables that are intervened on. Default: False.\n noise_shift_type: str\n Whether to shift the mean or standard deviation of the noise distribution for variables that are intervened on.\n Options: \"mean\" or \"std\". Default: \"mean\".\n mixing: str\n Mixing function. Options: \"linear\" or \"nonlinear\". Default: \"nonlinear\".\n scm: str\n Latent SCM. Options: \"linear\" or \"location-scale\". Default: \"linear\".\n n_nonlinearities: int\n Number of nonlinearities in the nonlinear mixing function. Default: 1.\n scm_coeffs_low: float\n Lower bound of the SCM coefficients in linear SCMs. Default: -1.\n scm_coeffs_high: float\n Upper bound of the SCM coefficients in linear SCMs. Default: 1.\n coeffs_min_abs_value: float\n Minimum absolute value of the SCM coefficients in linear SCMs. If None, no minimum absolute value is enforced.\n Default: None.\n edge_prob: float\n Probability of an edge in the adjacency matrix if no adjacency matrix is given. Default: None.\n snr: float\n Signal-to-noise ratio of the location-scale SCM. Default: 1.0.\n\n Returns\n -------\n medgp: MultiEnvDGP\n Multi-environment data generating process.\n \"\"\"\n if mixing == \"linear\":\n mixing_function = LinearMixing(\n latent_dim=latent_dim, observation_dim=observation_dim\n )\n elif mixing == \"nonlinear\":\n mixing_function = NonlinearMixing(\n latent_dim=latent_dim,\n observation_dim=observation_dim,\n n_nonlinearities=n_nonlinearities,\n )\n else:\n raise ValueError(f\"Unknown mixing function {mixing}\")\n\n # if adjacency_matrix is not given as numpy array, sample a random one\n if not isinstance(adjacency_matrix, np.ndarray):\n assert (\n edge_prob is not None\n ), \"edge_prob must be given if no adjacency_matrix is given\"\n adjacency_matrix = sample_random_dag(latent_dim, edge_prob)\n adjacency_matrix = adjacency_matrix\n\n if scm == \"linear\":\n latent_scm = LinearSCM(\n adjacency_matrix=adjacency_matrix,\n latent_dim=latent_dim,\n intervention_targets_per_env=intervention_targets_per_env,\n coeffs_low=scm_coeffs_low,\n coeffs_high=scm_coeffs_high,\n coeffs_min_abs_value=coeffs_min_abs_value,\n )\n elif scm == \"location-scale\":\n latent_scm = LocationScaleSCM(\n adjacency_matrix=adjacency_matrix,\n latent_dim=latent_dim,\n intervention_targets_per_env=intervention_targets_per_env,\n snr=snr,\n )\n else:\n raise ValueError(f\"Unknown SCM {scm}\")\n\n noise_generator = GaussianNoise(\n latent_dim=latent_dim,\n intervention_targets_per_env=intervention_targets_per_env,\n shift=shift_noise,\n shift_type=noise_shift_type,\n )\n medgp = MultiEnvDGP(\n latent_scm=latent_scm,\n noise_generator=noise_generator,\n mixing_function=mixing_function,\n )\n return medgp" }, { "identifier": "LinearCauCAModel", "path": "model/cauca_model.py", "snippet": "class LinearCauCAModel(CauCAModel):\n \"\"\"\n CauCA model with linear unmixing function.\n \"\"\"\n\n def __init__(\n self,\n latent_dim: int,\n adjacency_matrix: np.ndarray,\n intervention_targets_per_env: Tensor,\n lr: float = 1e-2,\n weight_decay: float = 0,\n lr_scheduler: Optional[str] = None,\n lr_min: float = 0.0,\n adjacency_misspecified: bool = False,\n fix_mechanisms: bool = True,\n nonparametric_base_distr: bool = False,\n ) -> None:\n super().__init__(\n latent_dim=latent_dim,\n adjacency_matrix=adjacency_matrix,\n lr=lr,\n weight_decay=weight_decay,\n lr_scheduler=lr_scheduler,\n lr_min=lr_min,\n adjacency_misspecified=adjacency_misspecified,\n )\n self.encoder = LinearCauCAEncoder(\n latent_dim,\n self.adjacency_matrix, # this is the misspecified adjacency matrix if adjacency_misspecified=True\n intervention_targets_per_env=intervention_targets_per_env,\n fix_mechanisms=fix_mechanisms,\n nonparametric_base_distr=nonparametric_base_distr,\n )\n self.save_hyperparameters()" }, { "identifier": "NaiveNonlinearModel", "path": "model/cauca_model.py", "snippet": "class NaiveNonlinearModel(CauCAModel):\n \"\"\"\n Naive CauCA model with nonlinear unmixing function. It assumes no causal dependencies.\n \"\"\"\n\n def __init__(\n self,\n latent_dim: int,\n adjacency_matrix: np.ndarray,\n lr: float = 1e-2,\n weight_decay: float = 0,\n lr_scheduler: Optional[str] = None,\n lr_min: float = 0.0,\n adjacency_misspecified: bool = False,\n k_flows: int = 1,\n intervention_targets_per_env: Optional[torch.Tensor] = None,\n net_hidden_dim: int = 128,\n net_hidden_layers: int = 3,\n ) -> None:\n super().__init__(\n latent_dim=latent_dim,\n adjacency_matrix=adjacency_matrix,\n lr=lr,\n weight_decay=weight_decay,\n lr_scheduler=lr_scheduler,\n lr_min=lr_min,\n adjacency_misspecified=adjacency_misspecified,\n )\n self.encoder = NaiveEncoder(\n latent_dim,\n self.adjacency_matrix, # this is the misspecified adjacency matrix if adjacency_misspecified=True\n K=k_flows,\n intervention_targets_per_env=intervention_targets_per_env,\n net_hidden_dim=net_hidden_dim,\n net_hidden_layers=net_hidden_layers,\n )\n self.save_hyperparameters()" }, { "identifier": "NonlinearCauCAModel", "path": "model/cauca_model.py", "snippet": "class NonlinearCauCAModel(CauCAModel):\n \"\"\"\n CauCA model with nonlinear unmixing function.\n\n Additional attributes\n ---------------------\n k_flows : int\n Number of flows to use in the nonlinear unmixing function. Default: 1.\n net_hidden_dim : int\n Hidden dimension of the neural network used in the nonlinear unmixing function. Default: 128.\n net_hidden_layers : int\n Number of hidden layers of the neural network used in the nonlinear unmixing function. Default: 3.\n fix_mechanisms : bool\n Some mechanisms can be fixed to a simple gaussian distribution without loss of generality.\n This has only an effect for the parametric base distribution. If True, these mechanisms are fixed.\n Default: True.\n fix_all_intervention_targets : bool\n When fixable mechanisms are fixed, this parameter determines whether all intervention targets\n are fixed (option 1) or all intervention targets which are non-root nodes together with all\n non-intervened root nodes (option 2). See documentation of ParamMultiEnvCausalDistribution\n for more details. Default: False.\n nonparametric_base_distr : bool\n Whether to use a nonparametric base distribution for the flows. If false, a parametric linear\n gaussian causal base distribution is used. Default: False.\n K_cbn : int\n Number of flows to use in the nonlinear nonparametric base distribution. Default: 3.\n net_hidden_dim_cbn : int\n Hidden dimension of the neural network used in the nonlinear nonparametric base distribution. Default: 128.\n net_hidden_layers_cbn : int\n Number of hidden layers of the neural network used in the nonlinear nonparametric base distribution. Default: 3.\n \"\"\"\n\n def __init__(\n self,\n latent_dim: int,\n adjacency_matrix: np.ndarray,\n intervention_targets_per_env: Tensor,\n lr: float = 1e-2,\n weight_decay: float = 0,\n lr_scheduler: Optional[str] = None,\n lr_min: float = 0.0,\n adjacency_misspecified: bool = False,\n k_flows: int = 1,\n net_hidden_dim: int = 128,\n net_hidden_layers: int = 3,\n fix_mechanisms: bool = True,\n fix_all_intervention_targets: bool = False,\n nonparametric_base_distr: bool = False,\n K_cbn: int = 3,\n net_hidden_dim_cbn: int = 128,\n net_hidden_layers_cbn: int = 3,\n ) -> None:\n super().__init__(\n latent_dim=latent_dim,\n adjacency_matrix=adjacency_matrix,\n lr=lr,\n weight_decay=weight_decay,\n lr_scheduler=lr_scheduler,\n lr_min=lr_min,\n adjacency_misspecified=adjacency_misspecified,\n )\n self.encoder = NonlinearCauCAEncoder(\n latent_dim,\n self.adjacency_matrix, # this is the misspecified adjacency matrix if adjacency_misspecified=True\n K=k_flows,\n intervention_targets_per_env=intervention_targets_per_env,\n net_hidden_dim=net_hidden_dim,\n net_hidden_layers=net_hidden_layers,\n fix_mechanisms=fix_mechanisms,\n fix_all_intervention_targets=fix_all_intervention_targets,\n nonparametric_base_distr=nonparametric_base_distr,\n K_cbn=K_cbn,\n net_hidden_dim_cbn=net_hidden_dim_cbn,\n net_hidden_layers_cbn=net_hidden_layers_cbn,\n )\n self.save_hyperparameters()" } ]
import argparse import os import pytorch_lightning as pl import torch from pathlib import Path from pytorch_lightning.loggers import WandbLogger from config import DGP from data_generator import MultiEnvDataModule, make_multi_env_dgp from model.cauca_model import LinearCauCAModel, NaiveNonlinearModel, NonlinearCauCAModel
12,382
) parser.add_argument( "--nonparametric-base-distr", type=bool, default=False, action=argparse.BooleanOptionalAction, help="Use nonparametric base distribution for flows.", ) parser.add_argument( "--wandb", type=bool, default=True, action=argparse.BooleanOptionalAction, help="Whether to log to weights and biases.", ) parser.add_argument( "--wandb-project", type=str, default="nonparam-ident", help="Weights & Biases project name.", ) args = parser.parse_args() if args.wandb: wandb_logger = WandbLogger(project=args.wandb_project) wandb_logger.experiment.config.update(args, allow_val_change=True) checkpoint_dir = ( Path(args.checkpoint_root_dir) / f"{wandb_logger.experiment.id}" ) logger = [wandb_logger] else: checkpoint_dir = Path(args.checkpoint_root_dir) / "default" logger = None checkpoint_callback = pl.callbacks.ModelCheckpoint( dirpath=checkpoint_dir, save_last=True, every_n_epochs=args.check_val_every_n_epoch, ) multi_env_dgp = make_multi_env_dgp( latent_dim=DGP[args.dgp]["num_causal_variables"], observation_dim=DGP[args.dgp]["observation_dim"], adjacency_matrix=DGP[args.dgp]["adj_matrix"], intervention_targets_per_env=DGP[args.dgp]["int_targets"], noise_shift_type=args.noise_shift_type, mixing=args.mixing, scm=args.scm, n_nonlinearities=args.n_nonlinearities, scm_coeffs_low=args.scm_coeffs_low, scm_coeffs_high=args.scm_coeffs_high, coeffs_min_abs_value=args.scm_coeffs_min_abs_value, edge_prob=DGP[args.dgp].get("edge_prob", None), snr=args.snr, ) data_module = MultiEnvDataModule( multi_env_dgp=multi_env_dgp, num_samples_per_env=DGP[args.dgp]["num_samples_per_env"], batch_size=args.batch_size, num_workers=os.cpu_count(), intervention_targets_per_env=DGP[args.dgp]["int_targets"], log_dir=checkpoint_dir / "data_stats", intervention_target_misspec=args.intervention_target_misspec, intervention_target_perm=args.intervention_target_perm, ) data_module.setup() pl.seed_everything(args.training_seed, workers=True) if args.intervention_target_misspec: # remember old intervention targets old_intervention_targets_per_env = DGP[args.dgp]["int_targets"] intervention_targets_per_env = torch.zeros_like( old_intervention_targets_per_env ) # get target permutation from data module perm = data_module.intervention_target_perm # permute intervention targets for env_idx in range(intervention_targets_per_env.shape[0]): for i in range(intervention_targets_per_env.shape[1]): if old_intervention_targets_per_env[env_idx, i] == 1: intervention_targets_per_env[env_idx, perm[i]] = 1 else: intervention_targets_per_env = DGP[args.dgp]["int_targets"] # Model Initialization if args.model == "nonlinear": model = NonlinearCauCAModel( latent_dim=DGP[args.dgp]["num_causal_variables"], adjacency_matrix=data_module.medgp.adjacency_matrix, k_flows=args.k_flows, lr=args.lr, intervention_targets_per_env=intervention_targets_per_env, lr_scheduler=args.lr_scheduler, lr_min=args.lr_min, adjacency_misspecified=args.adjacency_misspec, net_hidden_dim=args.net_hidden_dim, net_hidden_layers=args.net_hidden_layers, fix_mechanisms=args.fix_mechanisms, fix_all_intervention_targets=args.fix_all_intervention_targets, nonparametric_base_distr=args.nonparametric_base_distr, K_cbn=args.k_flows_cbn, net_hidden_dim_cbn=args.net_hidden_dim_cbn, net_hidden_layers_cbn=args.net_hidden_layers_cbn, ) elif args.model == "linear": model = LinearCauCAModel( latent_dim=DGP[args.dgp]["num_causal_variables"], adjacency_matrix=data_module.medgp.adjacency_matrix, lr=args.lr, intervention_targets_per_env=intervention_targets_per_env, lr_scheduler=args.lr_scheduler, lr_min=args.lr_min, adjacency_misspecified=args.adjacency_misspec, fix_mechanisms=args.fix_mechanisms, nonparametric_base_distr=args.nonparametric_base_distr, ) elif args.model == "naive":
def int_list(arg): try: int_list = int(arg) return int_list except ValueError: raise argparse.ArgumentTypeError("Invalid integer list format") if __name__ == "__main__": parser = argparse.ArgumentParser( description="Run experiment for Nonparametric Identifiability of Causal Representations from Unknown " "Interventions." ) parser.add_argument( "--max-epochs", type=int, default=10, help="Number of epochs to train for.", ) parser.add_argument( "--accelerator", type=str, default="gpu", help="Accelerator to use for training.", ) parser.add_argument( "--batch-size", type=int, default=1024, help="Number of samples per batch.", ) parser.add_argument( "--lr", type=float, default=1e-4, help="Learning rate for Adam optimizer.", ) parser.add_argument( "--checkpoint-root-dir", type=str, default="checkpoints", help="Checkpoint root directory.", ) parser.add_argument( "--noise-shift-type", type=str, default="mean", choices=["mean", "std"], help="Property of noise distribution that is shifted between environments.", ) parser.add_argument( "--check-val-every-n-epoch", type=int, default=1, help="Check validation loss every n epochs.", ) parser.add_argument( "--dgp", type=str, default="graph-4-0", help="Data generation process to use.", ) parser.add_argument( "--k-flows", type=int, default=1, help="Number of flows to use in nonlinear ICA model.", ) parser.add_argument( "--k-flows-cbn", type=int, default=3, help="Number of flows to use in nonlinear latent CBN model.", ) parser.add_argument( "--model", type=str, default="nonlinear", help="Type of encoder to use.", choices=["linear", "nonlinear", "naive"], ) parser.add_argument( "--seed", type=int, default=42, ) parser.add_argument( "--training-seed", type=int, default=42, ) parser.add_argument( "--mixing", type=str, default="nonlinear", help="Type of mixing function to use.", choices=["linear", "nonlinear"], ) parser.add_argument( "--scm", type=str, default="linear", help="Type of SCM to use.", choices=["linear", "location-scale"], ) parser.add_argument( "--n-nonlinearities", type=int, default=1, help="Number of nonlinearities to use in nonlinear mixing function.", ) parser.add_argument( "--learn-scm-params", type=bool, default=True, action=argparse.BooleanOptionalAction, help="Whether to learn SCM parameters.", ) parser.add_argument( "--lr-scheduler", type=str, default=None, help="Learning rate scheduler.", choices=[None, "cosine"], ) parser.add_argument( "--lr-min", type=float, default=0.0, help="Minimum learning rate for cosine learning rate scheduler.", ) parser.add_argument( "--scm-coeffs-low", type=float, default=-1, help="Lower bound for SCM coefficients.", ) parser.add_argument( "--scm-coeffs-high", type=float, default=1, help="Upper bound for SCM coefficients.", ) parser.add_argument( "--scm-coeffs-min-abs-value", type=float, default=None, help="Minimum absolute value for SCM coefficients.", ) parser.add_argument( "--snr", type=float, default=1.0, help="Signal-to-noise ratio in latent SCM.", ) parser.add_argument( "--adjacency-misspec", type=bool, default=False, action=argparse.BooleanOptionalAction, help="Misspecify adjacency matrix - assume ICA.", ) parser.add_argument( "--intervention-target-misspec", type=bool, default=False, action=argparse.BooleanOptionalAction, help="Misspecify intervention target - mix up labels and true intervention targets.", ) parser.add_argument( "--intervention-target-perm", nargs="+", # Allows multiple arguments to be passed as a list default=None, type=int_list, help="Permutation of intervention targets. Only used if intervention-target-misspec is True.", ) parser.add_argument( "--net-hidden-layers", type=int, default=3, help="Number of hidden layers in nonlinear encoder.", ) parser.add_argument( "--net-hidden-layers-cbn", type=int, default=3, help="Number of hidden layers in latent CBN model.", ) parser.add_argument( "--net-hidden-dim", type=int, default=128, help="Number of hidden dimensions in nonlinear encoder.", ) parser.add_argument( "--net-hidden-dim-cbn", type=int, default=128, help="Number of hidden dimensions in latent CBN model.", ) parser.add_argument( "--fix-mechanisms", type=bool, default=True, action=argparse.BooleanOptionalAction, help="Fix fixable mechanisms in latents.", ) parser.add_argument( "--fix-all-intervention-targets", type=bool, default=False, action=argparse.BooleanOptionalAction, help="Fix all intervention targets.", ) parser.add_argument( "--nonparametric-base-distr", type=bool, default=False, action=argparse.BooleanOptionalAction, help="Use nonparametric base distribution for flows.", ) parser.add_argument( "--wandb", type=bool, default=True, action=argparse.BooleanOptionalAction, help="Whether to log to weights and biases.", ) parser.add_argument( "--wandb-project", type=str, default="nonparam-ident", help="Weights & Biases project name.", ) args = parser.parse_args() if args.wandb: wandb_logger = WandbLogger(project=args.wandb_project) wandb_logger.experiment.config.update(args, allow_val_change=True) checkpoint_dir = ( Path(args.checkpoint_root_dir) / f"{wandb_logger.experiment.id}" ) logger = [wandb_logger] else: checkpoint_dir = Path(args.checkpoint_root_dir) / "default" logger = None checkpoint_callback = pl.callbacks.ModelCheckpoint( dirpath=checkpoint_dir, save_last=True, every_n_epochs=args.check_val_every_n_epoch, ) multi_env_dgp = make_multi_env_dgp( latent_dim=DGP[args.dgp]["num_causal_variables"], observation_dim=DGP[args.dgp]["observation_dim"], adjacency_matrix=DGP[args.dgp]["adj_matrix"], intervention_targets_per_env=DGP[args.dgp]["int_targets"], noise_shift_type=args.noise_shift_type, mixing=args.mixing, scm=args.scm, n_nonlinearities=args.n_nonlinearities, scm_coeffs_low=args.scm_coeffs_low, scm_coeffs_high=args.scm_coeffs_high, coeffs_min_abs_value=args.scm_coeffs_min_abs_value, edge_prob=DGP[args.dgp].get("edge_prob", None), snr=args.snr, ) data_module = MultiEnvDataModule( multi_env_dgp=multi_env_dgp, num_samples_per_env=DGP[args.dgp]["num_samples_per_env"], batch_size=args.batch_size, num_workers=os.cpu_count(), intervention_targets_per_env=DGP[args.dgp]["int_targets"], log_dir=checkpoint_dir / "data_stats", intervention_target_misspec=args.intervention_target_misspec, intervention_target_perm=args.intervention_target_perm, ) data_module.setup() pl.seed_everything(args.training_seed, workers=True) if args.intervention_target_misspec: # remember old intervention targets old_intervention_targets_per_env = DGP[args.dgp]["int_targets"] intervention_targets_per_env = torch.zeros_like( old_intervention_targets_per_env ) # get target permutation from data module perm = data_module.intervention_target_perm # permute intervention targets for env_idx in range(intervention_targets_per_env.shape[0]): for i in range(intervention_targets_per_env.shape[1]): if old_intervention_targets_per_env[env_idx, i] == 1: intervention_targets_per_env[env_idx, perm[i]] = 1 else: intervention_targets_per_env = DGP[args.dgp]["int_targets"] # Model Initialization if args.model == "nonlinear": model = NonlinearCauCAModel( latent_dim=DGP[args.dgp]["num_causal_variables"], adjacency_matrix=data_module.medgp.adjacency_matrix, k_flows=args.k_flows, lr=args.lr, intervention_targets_per_env=intervention_targets_per_env, lr_scheduler=args.lr_scheduler, lr_min=args.lr_min, adjacency_misspecified=args.adjacency_misspec, net_hidden_dim=args.net_hidden_dim, net_hidden_layers=args.net_hidden_layers, fix_mechanisms=args.fix_mechanisms, fix_all_intervention_targets=args.fix_all_intervention_targets, nonparametric_base_distr=args.nonparametric_base_distr, K_cbn=args.k_flows_cbn, net_hidden_dim_cbn=args.net_hidden_dim_cbn, net_hidden_layers_cbn=args.net_hidden_layers_cbn, ) elif args.model == "linear": model = LinearCauCAModel( latent_dim=DGP[args.dgp]["num_causal_variables"], adjacency_matrix=data_module.medgp.adjacency_matrix, lr=args.lr, intervention_targets_per_env=intervention_targets_per_env, lr_scheduler=args.lr_scheduler, lr_min=args.lr_min, adjacency_misspecified=args.adjacency_misspec, fix_mechanisms=args.fix_mechanisms, nonparametric_base_distr=args.nonparametric_base_distr, ) elif args.model == "naive":
model = NaiveNonlinearModel(
4
2023-10-25 09:25:26+00:00
16k
endo-yuki-t/MAG
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False\n ):\n super().__init__()\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(\n self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n if log_ema or self.use_ema:\n with self.ema_scope():\n xrec_ema, posterior_ema = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec_ema.shape[1] > 3\n xrec_ema = self.to_rgb(xrec_ema)\n log[\"samples_ema\"] = self.decode(torch.randn_like(posterior_ema.sample()))\n log[\"reconstructions_ema\"] = xrec_ema\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas)\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec\n \n @torch.no_grad()\n def forward_ddim(self, x_latent, cond, total_step=10, t0=1000, return_noised_maps=False):\n seq_inv = np.linspace(0, 1, total_step) * (t0-1)\n seq_inv = [int(s) for s in list(seq_inv)]\n seq_inv_next = [-1] + list(seq_inv[:-1])\n x_enc = x_latent.clone()\n x_encs = []\n with tqdm(total=len(seq_inv), desc=f\"Inversion process \", ascii=True) as progress_bar:\n for it, (i, j) in enumerate(zip((seq_inv_next[1:]), (seq_inv[1:]))):\n if return_noised_maps:\n x_encs.append(x_enc)\n t = torch.full((x_latent.shape[0],), i, device=x_latent.device, dtype=torch.long)\n t_prev = torch.full((x_latent.shape[0],), j, device=x_latent.device, dtype=torch.long)\n x_enc, _ = denoising_step(x_enc, c=cond, t=t, t_next=t_prev, model=self.model, b=self.model.betas, eta=0)\n progress_bar.update(1)\n \n if return_noised_maps:\n return x_enc, x_encs\n \n return x_enc\n \n @torch.no_grad()\n def reverse_ddim(self, x_latent, cond, total_step=10, t0=1000, eta=0, unconditional_guidance_scale=1.0, unconditional_conditioning=None, noised_maps=False, mask=False, merge_stop_th=10):\n seq_test = np.linspace(0, 1, total_step) * (t0-1)\n seq_test = [int(s) for s in list(seq_test)]\n seq_test_next = [-1] + list(seq_test[:-1])\n x_dec = x_latent.clone()\n step=len(seq_test)-1\n with tqdm(total=len(seq_test), desc=\"Generative process\", ascii=True) as progress_bar:\n for i, j in zip(reversed(seq_test[1:]), reversed(seq_test_next[1:])):\n t = torch.full((x_latent.shape[0],), i, device=x_latent.device, dtype=torch.long)\n t_next = torch.full((x_latent.shape[0],), j, device=x_latent.device, dtype=torch.long)\n x_dec, x_0 = denoising_step(x_dec, c=cond, t=t, t_next=t_next, model=self.model, b=self.model.betas, \n eta=eta, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning) \n if noised_maps is not False:\n step -= 1\n if step>merge_stop_th:\n x_dec = mask*x_dec+(1.-mask)*noised_maps[step]\n progress_bar.update(1)\n return x_dec\n \n def attention_guided_reverse_ddim(self, x_latent, cond, total_step=50, t0=1000, eta=0, unconditional_guidance_scale=1.0, unconditional_conditioning=None, att_masks=None, word_ids_for_mask=None, alpha=0.08, lmbd=0.5, swapping_step_th=float('inf'), guidance_step_th=float('inf')):\n seq_test = np.linspace(0, 1, total_step) * (t0-1)\n seq_test = [int(s) for s in list(seq_test)]\n seq_test_next = [-1] + list(seq_test[:-1])\n step=len(seq_test)-1\n optimized_latent = x_latent.clone().detach()\n \n with tqdm(total=len(seq_test), desc=\"Generative process\", ascii=True) as progress_bar:\n for i, j in zip(reversed(seq_test[1:]), reversed(seq_test_next[1:])):\n t = torch.full((x_latent.shape[0],), i, device=x_latent.device, dtype=torch.long)\n t_next = torch.full((x_latent.shape[0],), j, device=x_latent.device, dtype=torch.long)\n \n if t[0]>guidance_step_th:\n optimized_latent.requires_grad = True\n opt = torch.optim.SGD([optimized_latent], lr=alpha)\n _ = self.model.apply_model(optimized_latent, t.detach(), cond.detach())\n \n loss = 0.\n for name, module in self.model.named_modules():\n module_name = type(module).__name__\n if module_name == \"MemoryEfficientCrossAttention\" and 'attn2' in name:\n att = module.stored_attention\n w = int(math.sqrt(att.shape[1]))\n for amid, att_mask in enumerate(att_masks):\n if amid >= len(word_ids_for_mask):\n continue\n att_mask = att_mask.detach()\n att_mask_resized = torch.nn.functional.interpolate(att_mask, size=(w,w)).to(torch.bool)\n att_mask_ = rearrange(att_mask_resized, 'b ... -> b (...)')\n att_mask_ = repeat(att_mask_, 'b j -> (b h) j', h=module.heads)\n \n word_ids = word_ids_for_mask[amid]\n loss += -att[:,:,word_ids][att_mask_==1].sum()\n loss += lmbd*att[:,:,word_ids][att_mask_==0].sum()\n \n #print(\"Masked attention loss:\", loss)\n loss.backward(retain_graph=False)\n opt.step()\n \n with torch.no_grad():\n if t[0]>swapping_step_th:\n att_masks_att_ids = [att_masks,word_ids_for_mask]\n else:\n att_masks_att_ids = None\n x_dec, x_0 = denoising_step(optimized_latent, c=cond, t=t, t_next=t_next, model=self.model, b=self.model.betas, \n eta=eta, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning,\n att_mask=att_masks_att_ids)\n \n optimized_latent = x_dec.detach().clone() \n step -= 1\n progress_bar.update(1)\n \n return optimized_latent" } ]
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler
11,310
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit
if reset_ema: assert exists(ckpt_path)
1
2023-10-27 06:56:37+00:00
16k
Gene-Weaver/VoucherVision
vouchervision/VoucherVision_GUI.py
[ { "identifier": "write_config_file", "path": "vouchervision/LeafMachine2_Config_Builder.py", "snippet": "def write_config_file(config_data, dir_home, filename=\"LeafMachine2.yaml\"):\n file_path = os.path.join(dir_home, filename)\n\n # Write the data to a YAML file\n with open(file_path, \"w\") as outfile:\n yaml.dump(config_data, outfile, default_flow_style=False)" }, { "identifier": "build_VV_config", "path": "vouchervision/VoucherVision_Config_Builder.py", "snippet": "def build_VV_config():\n #############################################\n ############ Set common defaults ############\n #############################################\n # Changing the values below will set new \n # default values each time you open the \n # VoucherVision user interface\n #############################################\n #############################################\n #############################################\n\n dir_home = os.path.dirname(os.path.dirname(__file__))\n run_name = 'test'\n # dir_images_local = 'D:/Dropbox/LM2_Env/Image_Datasets/GBIF_BroadSample_3SppPerFamily1'\n dir_images_local = os.path.join(dir_home,'demo','demo_images')\n \n # The default output location is the computer's \"Downloads\" folder\n # You can set dir_output directly by typing the folder path,\n # OR you can uncomment the line \"dir_output = default_output_folder\" \n # to have VoucherVision save to the Downloads folder by default\n default_output_folder = get_default_download_folder()\n dir_output = default_output_folder\n # dir_output = 'D:/D_Desktop/LM2'\n\n prefix_removal = '' #'MICH-V-'\n suffix_removal = ''\n catalog_numerical_only = False\n\n LLM_version_user = 'Azure GPT 4'\n prompt_version = 'Version 2' # from [\"Version 1\", \"Version 1 No Domain Knowledge\", \"Version 2\"]\n use_LeafMachine2_collage_images = False # Use LeafMachine2 collage images\n do_create_OCR_helper_image = False\n\n batch_size = 500\n\n path_domain_knowledge = os.path.join(dir_home,'domain_knowledge','SLTP_UM_AllAsiaMinimalInRegion.xlsx')\n embeddings_database_name = os.path.splitext(os.path.basename(path_domain_knowledge))[0]\n\n #############################################\n #############################################\n ########## DO NOT EDIT BELOW HERE ###########\n #############################################\n #############################################\n return assemble_config(dir_home, run_name, dir_images_local,dir_output,\n prefix_removal,suffix_removal,catalog_numerical_only,LLM_version_user,batch_size,\n path_domain_knowledge,embeddings_database_name,use_LeafMachine2_collage_images,\n prompt_version, do_create_OCR_helper_image, use_domain_knowledge=False)" }, { "identifier": "run_demo_tests_GPT", "path": "vouchervision/VoucherVision_Config_Builder.py", "snippet": "def run_demo_tests_GPT(progress_report):\n dir_home, path_to_configs, test_results = build_demo_tests('gpt')\n progress_report.set_n_overall(len(test_results.items()))\n\n JSON_results = {}\n\n for ind, (cfg, result) in enumerate(test_results.items()):\n OPT1, OPT2, OPT3 = TestOptionsGPT.get_options()\n \n test_ind, ind_opt1, ind_opt2, ind_opt3 = cfg.split('__')\n opt1_readable = OPT1[int(ind_opt1.split('-')[1])]\n\n if opt1_readable in [\"Azure GPT 4\", \"Azure GPT 3.5\"]:\n api_version = 'gpt-azure'\n elif opt1_readable in [\"GPT 4\", \"GPT 3.5\"]:\n api_version = 'gpt'\n else:\n raise\n\n opt2_readable = \"Use LeafMachine2 for Collage Images\" if OPT2[int(ind_opt2.split('-')[1])] else \"Don't use LeafMachine2 for Collage Images\"\n opt3_readable = f\"Prompt {OPT3[int(ind_opt3.split('-')[1])]}\"\n # Construct the human-readable test name\n human_readable_name = f\"{opt1_readable}, {opt2_readable}, {opt3_readable}\"\n get_n_overall = progress_report.get_n_overall()\n progress_report.update_overall(f\"Test {int(test_ind)+1} of {get_n_overall} --- Validating {human_readable_name}\")\n print_main_fail(f\"Starting validation test: {human_readable_name}\")\n cfg_file_path = os.path.join(path_to_configs,'.'.join([cfg,'yaml']))\n \n if check_API_key(dir_home, api_version) and check_API_key(dir_home, 'google-vision-ocr'):\n try:\n last_JSON_response, total_cost = voucher_vision(cfg_file_path, dir_home, cfg_test=None, progress_report=progress_report, test_ind=int(test_ind))\n test_results[cfg] = True\n JSON_results[ind] = last_JSON_response\n except Exception as e:\n JSON_results[ind] = None\n test_results[cfg] = False\n print(f\"An exception occurred: {e}\")\n traceback.print_exc() # This will print the full traceback\n else:\n fail_response = ''\n if not check_API_key(dir_home, 'google-vision-ocr'):\n fail_response += \"No API key found for Google Vision OCR\"\n if not check_API_key(dir_home, api_version):\n fail_response += f\" + No API key found for {api_version}\"\n test_results[cfg] = False\n JSON_results[ind] = fail_response\n print(f\"No API key found for {fail_response}\")\n \n return test_results, JSON_results" }, { "identifier": "run_demo_tests_Palm", "path": "vouchervision/VoucherVision_Config_Builder.py", "snippet": "def run_demo_tests_Palm(progress_report):\n api_version = 'palm'\n\n dir_home, path_to_configs, test_results = build_demo_tests('palm')\n progress_report.set_n_overall(len(test_results.items()))\n\n JSON_results = {}\n\n for ind, (cfg, result) in enumerate(test_results.items()):\n OPT1, OPT2, OPT3 = TestOptionsPalm.get_options()\n test_ind, ind_opt1, ind_opt2, ind_opt3 = cfg.split('__')\n opt1_readable = OPT1[int(ind_opt1.split('-')[1])]\n opt2_readable = \"Use LeafMachine2 for Collage Images\" if OPT2[int(ind_opt2.split('-')[1])] else \"Don't use LeafMachine2 for Collage Images\"\n opt3_readable = f\"Prompt {OPT3[int(ind_opt3.split('-')[1])]}\"\n # opt3_readable = \"Use Domain Knowledge\" if OPT3[int(ind_opt3.split('-')[1])] else \"Don't use Domain Knowledge\"\n # Construct the human-readable test name\n human_readable_name = f\"{opt1_readable}, {opt2_readable}, {opt3_readable}\"\n get_n_overall = progress_report.get_n_overall()\n progress_report.update_overall(f\"Test {int(test_ind)+1} of {get_n_overall} --- Validating {human_readable_name}\")\n print_main_fail(f\"Starting validation test: {human_readable_name}\")\n cfg_file_path = os.path.join(path_to_configs,'.'.join([cfg,'yaml']))\n \n if check_API_key(dir_home, api_version) and check_API_key(dir_home, 'google-vision-ocr') :\n try:\n last_JSON_response, total_cost = voucher_vision(cfg_file_path, dir_home, cfg_test=None, path_custom_prompts=None, progress_report=progress_report, test_ind=int(test_ind))\n test_results[cfg] = True\n JSON_results[ind] = last_JSON_response\n except Exception as e:\n test_results[cfg] = False\n JSON_results[ind] = None\n print(f\"An exception occurred: {e}\")\n traceback.print_exc() # This will print the full traceback\n else:\n fail_response = ''\n if not check_API_key(dir_home, 'google-vision-ocr'):\n fail_response += \"No API key found for Google Vision OCR\"\n if not check_API_key(dir_home, api_version):\n fail_response += f\" + No API key found for {api_version}\"\n test_results[cfg] = False\n JSON_results[ind] = fail_response\n print(f\"No API key found for {fail_response}\")\n\n return test_results, JSON_results" }, { "identifier": "TestOptionsGPT", "path": "vouchervision/VoucherVision_Config_Builder.py", "snippet": "class TestOptionsGPT:\n OPT1 = [\"gpt-4-1106-preview\",\"GPT 4\", \"GPT 3.5\", \"Azure GPT 4\", \"Azure GPT 3.5\"]\n OPT2 = [False, True]\n OPT3 = [\"Version 1\", \"Version 1 No Domain Knowledge\", \"Version 2\"]\n\n @classmethod\n def get_options(cls):\n return cls.OPT1, cls.OPT2, cls.OPT3\n @classmethod\n def get_length(cls):\n return 24" }, { "identifier": "TestOptionsPalm", "path": "vouchervision/VoucherVision_Config_Builder.py", "snippet": "class TestOptionsPalm:\n OPT1 = [\"PaLM 2\"]\n OPT2 = [False, True]\n OPT3 = [\"Version 1 PaLM 2\", \"Version 1 PaLM 2 No Domain Knowledge\", \"Version 2 PaLM 2\"]\n\n @classmethod\n def get_options(cls):\n return cls.OPT1, cls.OPT2, cls.OPT3\n @classmethod\n def get_length(cls):\n return 6" }, { "identifier": "check_if_usable", "path": "vouchervision/VoucherVision_Config_Builder.py", "snippet": "def check_if_usable():\n dir_home = os.path.dirname(os.path.dirname(__file__))\n path_cfg_private = os.path.join(dir_home, 'PRIVATE_DATA.yaml')\n cfg_private = get_cfg_from_full_path(path_cfg_private)\n\n has_key_openai = has_API_key(cfg_private['openai']['OPENAI_API_KEY'])\n\n has_key_azure_openai = has_API_key(cfg_private['openai_azure']['api_version']) \n\n has_key_palm2 = has_API_key(cfg_private['google_palm']['google_palm_api'])\n \n has_key_google_OCR = has_API_key(cfg_private['google_cloud']['path_json_file'])\n\n if has_key_google_OCR and (has_key_azure_openai or has_key_openai or has_key_palm2):\n return True\n else:\n return False" }, { "identifier": "run_api_tests", "path": "vouchervision/VoucherVision_Config_Builder.py", "snippet": "def run_api_tests(api):\n try:\n dir_home, path_to_configs, test_results = build_api_tests(api)\n\n JSON_results = {}\n\n for ind, (cfg, result) in enumerate(test_results.items()):\n if api == 'openai':\n OPT1, OPT2, OPT3 = TestOptionsAPI_openai.get_options()\n elif 'azure_openai':\n OPT1, OPT2, OPT3 = TestOptionsAPI_azure_openai.get_options()\n elif 'palm':\n OPT1, OPT2, OPT3 = TestOptionsAPI_palm.get_options()\n test_ind, ind_opt1, ind_opt2, ind_opt3 = cfg.split('__')\n opt1_readable = OPT1[int(ind_opt1.split('-')[1])]\n opt2_readable = \"Use LeafMachine2 for Collage Images\" if OPT2[int(ind_opt2.split('-')[1])] else \"Don't use LeafMachine2 for Collage Images\"\n opt3_readable = f\"Prompt {OPT3[int(ind_opt3.split('-')[1])]}\"\n # opt3_readable = \"Use Domain Knowledge\" if OPT3[int(ind_opt3.split('-')[1])] else \"Don't use Domain Knowledge\"\n # Construct the human-readable test name\n human_readable_name = f\"{opt1_readable}, {opt2_readable}, {opt3_readable}\"\n print_main_fail(f\"Starting validation test: {human_readable_name}\")\n cfg_file_path = os.path.join(path_to_configs,'.'.join([cfg,'yaml']))\n \n if check_API_key(dir_home, api) and check_API_key(dir_home, 'google-vision-ocr') :\n try:\n last_JSON_response, total_cost = voucher_vision(cfg_file_path, dir_home, None,path_custom_prompts=None , cfg_test=None, progress_report=None, test_ind=int(test_ind))\n test_results[cfg] = True\n JSON_results[ind] = last_JSON_response\n return True\n\n except Exception as e:\n print(e)\n return False\n else:\n return False\n except Exception as e:\n print(e)\n return False" }, { "identifier": "voucher_vision", "path": "vouchervision/vouchervision_main.py", "snippet": "def voucher_vision(cfg_file_path, dir_home, path_custom_prompts, cfg_test, progress_report, path_api_cost=None, test_ind = None, is_real_run=False):\n # get_n_overall = progress_report.get_n_overall()\n # progress_report.update_overall(f\"Working on {test_ind+1} of {get_n_overall}\")\n\n t_overall = perf_counter()\n\n # Load config file\n report_config(dir_home, cfg_file_path, system='VoucherVision')\n\n if cfg_test is None:\n cfg = load_config_file(dir_home, cfg_file_path, system='VoucherVision') # For VoucherVision\n else:\n cfg = cfg_test \n # user_cfg = load_config_file(dir_home, cfg_file_path)\n # cfg = Config(user_cfg)\n\n # Check to see if there are subdirs\n # Yes --> use the names of the subsirs as run_name\n run_name, dirs_list, has_subdirs = check_for_subdirs_VV(cfg)\n print(f\"run_name {run_name} dirs_list{dirs_list} has_subdirs{has_subdirs}\")\n\n # for dir_ind, dir_in in enumerate(dirs_list):\n # if has_subdirs:\n # cfg['leafmachine']['project']['dir_images_local'] = dir_in\n # cfg['leafmachine']['project']['run_name'] = run_name[dir_ind]\n\n # Dir structure\n if is_real_run:\n progress_report.update_overall(f\"Creating Output Directory Structure\")\n print_main_start(\"Creating Directory Structure\")\n Dirs = Dir_Structure(cfg)\n\n # logging.info(\"Hi\")\n logger = start_logging(Dirs, cfg)\n\n # Check to see if required ML files are ready to use\n if is_real_run:\n progress_report.update_overall(f\"Fetching LeafMachine2 Files\")\n ready_to_use = fetch_data(logger, dir_home, cfg_file_path)\n assert ready_to_use, \"Required ML files are not ready to use!\\nThe download may have failed,\\nor\\nthe directory structure of LM2 has been altered\"\n\n # Wrangle images and preprocess\n print_main_start(\"Gathering Images and Image Metadata\")\n Project = Project_Info(cfg, logger, dir_home, Dirs) # Where file names are modified\n\n # Save config file\n save_config_file(cfg, logger, Dirs)\n\n # Detect Archival Components\n print_main_start(\"Locating Archival Components\")\n Project = detect_archival_components(cfg, logger, dir_home, Project, Dirs, is_real_run, progress_report)\n\n # Save cropped detections\n crop_detections_from_images_VV(cfg, logger, dir_home, Project, Dirs)\n\n # Process labels\n Voucher_Vision = VoucherVision(cfg, logger, dir_home, path_custom_prompts, Project, Dirs)\n n_images = len(Voucher_Vision.img_paths)\n last_JSON_response, total_tokens_in, total_tokens_out = Voucher_Vision.process_specimen_batch(progress_report, is_real_run)\n \n if path_api_cost:\n cost_summary, data, total_cost = save_token_info_as_csv(Dirs, cfg['leafmachine']['LLM_version'], path_api_cost, total_tokens_in, total_tokens_out, n_images)\n add_to_expense_report(dir_home, data)\n logger.info(cost_summary)\n else:\n total_cost = None #TODO add config tests to expense_report\n\n t_overall_s = perf_counter()\n logger.name = 'Run Complete! :)'\n logger.info(f\"[Total elapsed time] {round((t_overall_s - t_overall)/60)} minutes\")\n space_saver(cfg, Dirs, logger)\n\n if is_real_run:\n progress_report.update_overall(f\"Run Complete! :sunglasses:\")\n\n for handler in logger.handlers[:]:\n handler.close()\n logger.removeHandler(handler)\n\n return last_JSON_response, total_cost" }, { "identifier": "voucher_vision_OCR_test", "path": "vouchervision/vouchervision_main.py", "snippet": "def voucher_vision_OCR_test(cfg_file_path, dir_home, cfg_test, path_to_crop):\n # get_n_overall = progress_report.get_n_overall()\n # progress_report.update_overall(f\"Working on {test_ind+1} of {get_n_overall}\")\n\n # Load config file\n report_config(dir_home, cfg_file_path, system='VoucherVision')\n\n if cfg_test is None:\n cfg = load_config_file(dir_home, cfg_file_path, system='VoucherVision') # For VoucherVision\n else:\n cfg = cfg_test \n # user_cfg = load_config_file(dir_home, cfg_file_path)\n # cfg = Config(user_cfg)\n\n # Check to see if there are subdirs\n # Yes --> use the names of the subsirs as run_name\n run_name, dirs_list, has_subdirs = check_for_subdirs_VV(cfg)\n print(f\"run_name {run_name} dirs_list{dirs_list} has_subdirs{has_subdirs}\")\n\n # for dir_ind, dir_in in enumerate(dirs_list):\n # if has_subdirs:\n # cfg['leafmachine']['project']['dir_images_local'] = dir_in\n # cfg['leafmachine']['project']['run_name'] = run_name[dir_ind]\n\n # Dir structure\n print_main_start(\"Creating Directory Structure\")\n Dirs = Dir_Structure(cfg)\n\n # logging.info(\"Hi\")\n logger = start_logging(Dirs, cfg)\n\n # Check to see if required ML files are ready to use\n ready_to_use = fetch_data(logger, dir_home, cfg_file_path)\n assert ready_to_use, \"Required ML files are not ready to use!\\nThe download may have failed,\\nor\\nthe directory structure of LM2 has been altered\"\n\n # Wrangle images and preprocess\n print_main_start(\"Gathering Images and Image Metadata\")\n Project = Project_Info(cfg, logger, dir_home, Dirs) # Where file names are modified\n\n # Save config file\n save_config_file(cfg, logger, Dirs)\n\n # Detect Archival Components\n print_main_start(\"Locating Archival Components\")\n Project = detect_archival_components(cfg, logger, dir_home, Project, Dirs)\n\n # Save cropped detections\n crop_detections_from_images_VV(cfg, logger, dir_home, Project, Dirs)\n\n # Process labels\n Voucher_Vision = VoucherVision(cfg, logger, dir_home, None, Project, Dirs)\n last_JSON_response = Voucher_Vision.process_specimen_batch_OCR_test(path_to_crop)" }, { "identifier": "test_GPU", "path": "vouchervision/general_utils.py", "snippet": "def test_GPU():\n info = []\n success = False\n\n if torch.cuda.is_available():\n num_gpus = torch.cuda.device_count()\n info.append(f\"Number of GPUs: {num_gpus}\")\n\n for i in range(num_gpus):\n gpu = torch.cuda.get_device_properties(i)\n info.append(f\"GPU {i}: {gpu.name}\")\n\n success = True\n else:\n info.append(\"No GPU found!\")\n info.append(\"LeafMachine2 image cropping and embedding search will be slow or not possible.\")\n\n return success, info" }, { "identifier": "get_cfg_from_full_path", "path": "vouchervision/general_utils.py", "snippet": "def get_cfg_from_full_path(path_cfg):\n with open(path_cfg, \"r\") as ymlfile:\n cfg = yaml.full_load(ymlfile)\n return cfg" }, { "identifier": "summarize_expense_report", "path": "vouchervision/general_utils.py", "snippet": "def summarize_expense_report(path_expense_report):\n # Initialize counters and sums\n run_count = 0\n total_cost_sum = 0\n tokens_in_sum = 0\n tokens_out_sum = 0\n rate_in_sum = 0\n rate_out_sum = 0\n cost_in_sum = 0\n cost_out_sum = 0\n n_images_sum = 0\n api_version_counts = Counter()\n\n # Try to read the CSV file into a DataFrame\n try:\n df = pd.read_csv(path_expense_report)\n\n # Process each row in the DataFrame\n for index, row in df.iterrows():\n run_count += 1\n total_cost_sum += row['total_cost']\n tokens_in_sum += row['tokens_in']\n tokens_out_sum += row['tokens_out']\n rate_in_sum += row['rate_in']\n rate_out_sum += row['rate_out']\n cost_in_sum += row['cost_in']\n cost_out_sum += row['cost_out']\n n_images_sum += row['n_images']\n api_version_counts[row['api_version']] += 1\n\n except FileNotFoundError:\n print(f\"The file {path_expense_report} does not exist.\")\n return None\n\n # Calculate API version percentages\n api_version_percentages = {version: (count / run_count) * 100 for version, count in api_version_counts.items()}\n\n # Calculate cost per image for each API version\n cost_per_image_dict = {}\n for version, count in api_version_counts.items():\n total_cost = df[df['api_version'] == version]['total_cost'].sum()\n n_images = df[df['api_version'] == version]['n_images'].sum()\n cost_per_image = total_cost / n_images if n_images > 0 else 0\n cost_per_image_dict[version] = cost_per_image\n\n # Return the DataFrame and all summaries\n return {\n 'run_count': run_count,\n 'total_cost_sum': total_cost_sum,\n 'tokens_in_sum': tokens_in_sum,\n 'tokens_out_sum': tokens_out_sum,\n 'rate_in_sum': rate_in_sum,\n 'rate_out_sum': rate_out_sum,\n 'cost_in_sum': cost_in_sum,\n 'cost_out_sum': cost_out_sum,\n 'n_images_sum':n_images_sum,\n 'api_version_percentages': api_version_percentages,\n 'cost_per_image': cost_per_image_dict\n }, df" }, { "identifier": "create_google_ocr_yaml_config", "path": "vouchervision/general_utils.py", "snippet": "def create_google_ocr_yaml_config(output_file, dir_images_local, dir_output):\n # Define the configuration dictionary\n config = {\n 'leafmachine': {\n 'LLM_version': 'PaLM 2',\n 'archival_component_detector': {\n 'detector_iteration': 'PREP_final',\n 'detector_type': 'Archival_Detector',\n 'detector_version': 'PREP_final',\n 'detector_weights': 'best.pt',\n 'do_save_prediction_overlay_images': True,\n 'ignore_objects_for_overlay': [],\n 'minimum_confidence_threshold': 0.5\n },\n 'cropped_components': {\n 'binarize_labels': False,\n 'binarize_labels_skeletonize': False,\n 'do_save_cropped_annotations': True,\n 'save_cropped_annotations': ['label', 'barcode'],\n 'save_per_annotation_class': True,\n 'save_per_image': False\n },\n 'data': {\n 'do_apply_conversion_factor': False,\n 'include_darwin_core_data_from_combined_file': False,\n 'save_individual_csv_files_landmarks': False,\n 'save_individual_csv_files_measurements': False,\n 'save_individual_csv_files_rulers': False,\n 'save_individual_efd_files': False,\n 'save_json_measurements': False,\n 'save_json_rulers': False\n },\n 'do': {\n 'check_for_corrupt_images_make_vertical': True,\n 'check_for_illegal_filenames': False\n },\n 'logging': {\n 'log_level': None\n },\n 'modules': {\n 'specimen_crop': True\n },\n 'overlay': {\n 'alpha_transparency_archival': 0.3,\n 'alpha_transparency_plant': 0,\n 'alpha_transparency_seg_partial_leaf': 0.3,\n 'alpha_transparency_seg_whole_leaf': 0.4,\n 'ignore_archival_detections_classes': [],\n 'ignore_landmark_classes': [],\n 'ignore_plant_detections_classes': ['leaf_whole', 'specimen'],\n 'line_width_archival': 12,\n 'line_width_efd': 12,\n 'line_width_plant': 12,\n 'line_width_seg': 12,\n 'overlay_background_color': 'black',\n 'overlay_dpi': 300,\n 'save_overlay_to_jpgs': True,\n 'save_overlay_to_pdf': False,\n 'show_archival_detections': True,\n 'show_landmarks': True,\n 'show_plant_detections': True,\n 'show_segmentations': True\n },\n 'print': {\n 'optional_warnings': True,\n 'verbose': True\n },\n 'project': {\n 'batch_size': 500,\n 'build_new_embeddings_database': False,\n 'catalog_numerical_only': False,\n 'continue_run_from_partial_xlsx': '',\n 'delete_all_temps': False,\n 'delete_temps_keep_VVE': False,\n 'dir_images_local': dir_images_local,\n 'dir_output': dir_output,\n 'embeddings_database_name': 'SLTP_UM_AllAsiaMinimalInRegion',\n 'image_location': 'local',\n 'num_workers': 1,\n 'path_to_domain_knowledge_xlsx': '',\n 'prefix_removal': '',\n 'prompt_version': 'Version 2 PaLM 2',\n 'run_name': 'google_vision_ocr_test',\n 'suffix_removal': '',\n 'use_domain_knowledge': False\n },\n 'use_RGB_label_images': False\n }\n }\n # Generate the YAML string from the data structure\n validate_dir(os.path.dirname(output_file))\n yaml_str = yaml.dump(config, sort_keys=False)\n\n # Write the YAML string to a file\n with open(output_file, 'w') as file:\n file.write(yaml_str)" }, { "identifier": "validate_dir", "path": "vouchervision/general_utils.py", "snippet": "def validate_dir(dir):\n if not os.path.exists(dir):\n os.makedirs(dir, exist_ok=True)" } ]
import streamlit as st import yaml, os, json, random, time, re import matplotlib.pyplot as plt import plotly.graph_objs as go import numpy as np import pandas as pd from itertools import chain from PIL import Image from typing import Union from streamlit_extras.let_it_rain import rain from vouchervision.LeafMachine2_Config_Builder import write_config_file from vouchervision.VoucherVision_Config_Builder import build_VV_config, run_demo_tests_GPT, run_demo_tests_Palm , TestOptionsGPT, TestOptionsPalm, check_if_usable, run_api_tests from vouchervision.vouchervision_main import voucher_vision, voucher_vision_OCR_test from vouchervision.general_utils import test_GPU, get_cfg_from_full_path, summarize_expense_report, create_google_ocr_yaml_config, validate_dir
10,828
st.write("") if st.session_state.config['leafmachine']['project']['use_domain_knowledge']: st.session_state.config['leafmachine']['project']['embeddings_database_name'] = st.text_input("Embeddings database name (only use underscores)", st.session_state.config['leafmachine']['project'].get('embeddings_database_name', '')) st.session_state.config['leafmachine']['project']['build_new_embeddings_database'] = st.checkbox("Build *new* embeddings database", st.session_state.config['leafmachine']['project'].get('build_new_embeddings_database', False)) st.session_state.config['leafmachine']['project']['path_to_domain_knowledge_xlsx'] = st.text_input("Path to domain knowledge CSV file (will be used to create new embeddings database)", st.session_state.config['leafmachine']['project'].get('path_to_domain_knowledge_xlsx', '')) else: st.session_state.config['leafmachine']['project']['embeddings_database_name'] = st.text_input("Embeddings database name (only use underscores)", st.session_state.config['leafmachine']['project'].get('embeddings_database_name', ''), disabled=True) st.session_state.config['leafmachine']['project']['build_new_embeddings_database'] = st.checkbox("Build *new* embeddings database", st.session_state.config['leafmachine']['project'].get('build_new_embeddings_database', False), disabled=True) st.session_state.config['leafmachine']['project']['path_to_domain_knowledge_xlsx'] = st.text_input("Path to domain knowledge CSV file (will be used to create new embeddings database)", st.session_state.config['leafmachine']['project'].get('path_to_domain_knowledge_xlsx', ''), disabled=True) def render_expense_report_summary(): expense_summary = st.session_state.expense_summary expense_report = st.session_state.expense_report st.header('Expense Report Summary') if expense_summary: st.metric(label="Total Cost", value=f"${round(expense_summary['total_cost_sum'], 4):,}") col1, col2 = st.columns(2) # Run count and total costs with col1: st.metric(label="Run Count", value=expense_summary['run_count']) st.metric(label="Tokens In", value=f"{expense_summary['tokens_in_sum']:,}") # Token information with col2: st.metric(label="Total Images", value=expense_summary['n_images_sum']) st.metric(label="Tokens Out", value=f"{expense_summary['tokens_out_sum']:,}") # Calculate cost proportion per image for each API version st.subheader('Average Cost per Image by API Version') cost_labels = [] cost_values = [] total_images = 0 cost_per_image_dict = {} # Iterate through the expense report to accumulate costs and image counts for index, row in expense_report.iterrows(): api_version = row['api_version'] total_cost = row['total_cost'] n_images = row['n_images'] total_images += n_images # Keep track of total images processed if api_version not in cost_per_image_dict: cost_per_image_dict[api_version] = {'total_cost': 0, 'n_images': 0} cost_per_image_dict[api_version]['total_cost'] += total_cost cost_per_image_dict[api_version]['n_images'] += n_images api_versions = list(cost_per_image_dict.keys()) colors = [COLORS_EXPENSE_REPORT[version] if version in COLORS_EXPENSE_REPORT else '#DDDDDD' for version in api_versions] # Calculate the cost per image for each API version for version, cost_data in cost_per_image_dict.items(): total_cost = cost_data['total_cost'] n_images = cost_data['n_images'] # Calculate the cost per image for this version cost_per_image = total_cost / n_images if n_images > 0 else 0 cost_labels.append(version) cost_values.append(cost_per_image) # Generate the pie chart cost_pie_chart = go.Figure(data=[go.Pie(labels=cost_labels, values=cost_values, hole=.3)]) # Update traces for custom text in hoverinfo, displaying cost with a dollar sign and two decimal places cost_pie_chart.update_traces( marker=dict(colors=colors), text=[f"${value:.2f}" for value in cost_values], # Formats the cost as a string with a dollar sign and two decimals textinfo='percent+label', hoverinfo='label+percent+text' # Adds custom text (formatted cost) to the hover information ) st.plotly_chart(cost_pie_chart, use_container_width=True) st.subheader('Proportion of Total Cost by API Version') cost_labels = [] cost_proportions = [] total_cost_by_version = {} # Sum the total cost for each API version for index, row in expense_report.iterrows(): api_version = row['api_version'] total_cost = row['total_cost'] if api_version not in total_cost_by_version: total_cost_by_version[api_version] = 0 total_cost_by_version[api_version] += total_cost # Calculate the combined total cost for all versions combined_total_cost = sum(total_cost_by_version.values()) # Calculate the proportion of total cost for each API version for version, total_cost in total_cost_by_version.items(): proportion = (total_cost / combined_total_cost) * 100 if combined_total_cost > 0 else 0 cost_labels.append(version) cost_proportions.append(proportion) # Generate the pie chart cost_pie_chart = go.Figure(data=[go.Pie(labels=cost_labels, values=cost_proportions, hole=.3)]) # Update traces for custom text in hoverinfo cost_pie_chart.update_traces( marker=dict(colors=colors), text=[f"${cost:.2f}" for cost in total_cost_by_version.values()], # This will format the cost to 2 decimal places textinfo='percent+label', hoverinfo='label+percent+text' # This tells Plotly to show the label, percent, and custom text (cost) on hover ) st.plotly_chart(cost_pie_chart, use_container_width=True) # API version usage percentages pie chart st.subheader('Runs by API Version') api_versions = list(expense_summary['api_version_percentages'].keys()) percentages = [expense_summary['api_version_percentages'][version] for version in api_versions] pie_chart = go.Figure(data=[go.Pie(labels=api_versions, values=percentages, hole=.3)]) pie_chart.update_layout(margin=dict(t=0, b=0, l=0, r=0)) pie_chart.update_traces(marker=dict(colors=colors),) st.plotly_chart(pie_chart, use_container_width=True) else: st.error('No expense report data available.') def sidebar_content(): if not os.path.exists(os.path.join(st.session_state.dir_home,'expense_report')): validate_dir(os.path.join(st.session_state.dir_home,'expense_report')) expense_report_path = os.path.join(st.session_state.dir_home, 'expense_report', 'expense_report.csv') if os.path.exists(expense_report_path): # File exists, proceed with summarization
PROMPTS_THAT_NEED_DOMAIN_KNOWLEDGE = ["Version 1","Version 1 PaLM 2"] COLORS_EXPENSE_REPORT = { 'GPT_4': '#8fff66', # Bright Green 'GPT_3_5': '#006400', # Dark Green 'PALM2': '#66a8ff' # blue } class ProgressReport: def __init__(self, overall_bar, batch_bar, text_overall, text_batch): self.overall_bar = overall_bar self.batch_bar = batch_bar self.text_overall = text_overall self.text_batch = text_batch self.current_overall_step = 0 self.total_overall_steps = 20 # number of major steps in machine function self.current_batch = 0 self.total_batches = 20 def update_overall(self, step_name=""): self.current_overall_step += 1 self.overall_bar.progress(self.current_overall_step / self.total_overall_steps) self.text_overall.text(step_name) def update_batch(self, step_name=""): self.current_batch += 1 self.batch_bar.progress(self.current_batch / self.total_batches) self.text_batch.text(step_name) def set_n_batches(self, n_batches): self.total_batches = n_batches def set_n_overall(self, total_overall_steps): self.current_overall_step = 0 self.overall_bar.progress(0) self.total_overall_steps = total_overall_steps def reset_batch(self, step_name): self.current_batch = 0 self.batch_bar.progress(0) self.text_batch.text(step_name) def reset_overall(self, step_name): self.current_overall_step = 0 self.overall_bar.progress(0) self.text_overall.text(step_name) def get_n_images(self): return self.n_images def get_n_overall(self): return self.total_overall_steps def does_private_file_exist(): dir_home = os.path.dirname(os.path.dirname(__file__)) path_cfg_private = os.path.join(dir_home, 'PRIVATE_DATA.yaml') return os.path.exists(path_cfg_private) def setup_streamlit_config(dir_home): # Define the directory path and filename dir_path = os.path.join(dir_home, ".streamlit") file_path = os.path.join(dir_path, "config.toml") # Check if directory exists, if not create it if not os.path.exists(dir_path): os.makedirs(dir_path) # Create or modify the file with the provided content config_content = f""" [theme] base = "dark" primaryColor = "#00ff00" [server] enableStaticServing = false runOnSave = true port = 8524 """ with open(file_path, "w") as f: f.write(config_content.strip()) def display_scrollable_results(JSON_results, test_results, OPT2, OPT3): """ Display the results from JSON_results in a scrollable container. """ # Initialize the container con_results = st.empty() with con_results.container(): # Start the custom container for all the results results_html = """<div class='scrollable-results-container'>""" for idx, (test_name, _) in enumerate(sorted(test_results.items())): _, ind_opt1, ind_opt2, ind_opt3 = test_name.split('__') opt2_readable = "Use LeafMachine2" if OPT2[int(ind_opt2.split('-')[1])] else "Don't use LeafMachine2" opt3_readable = f"{OPT3[int(ind_opt3.split('-')[1])]}" if JSON_results[idx] is None: results_html += f"<p>None</p>" else: formatted_json = json.dumps(JSON_results[idx], indent=4, sort_keys=False) results_html += f"<pre>[{opt2_readable}] + [{opt3_readable}]<br/>{formatted_json}</pre>" # End the custom container results_html += """</div>""" # The CSS to make this container scrollable css = """ <style> .scrollable-results-container { overflow-y: auto; height: 600px; width: 100%; white-space: pre-wrap; # To wrap the content font-family: monospace; # To give the JSON a code-like appearance } </style> """ # Apply the CSS and then the results st.markdown(css, unsafe_allow_html=True) st.markdown(results_html, unsafe_allow_html=True) def refresh(): st.write('') def display_test_results(test_results, JSON_results, llm_version): if llm_version == 'gpt': OPT1, OPT2, OPT3 = TestOptionsGPT.get_options() elif llm_version == 'palm': OPT1, OPT2, OPT3 = TestOptionsPalm.get_options() else: raise widths = [1] * (len(OPT1) + 2) + [2] columns = st.columns(widths) with columns[0]: st.write("LeafMachine2") with columns[1]: st.write("Prompt") with columns[len(OPT1) + 2]: st.write("Scroll to See Last Transcription in Each Test") already_written = set() for test_name, result in sorted(test_results.items()): _, ind_opt1, _, _ = test_name.split('__') option_value = OPT1[int(ind_opt1.split('-')[1])] if option_value not in already_written: with columns[int(ind_opt1.split('-')[1]) + 2]: st.write(option_value) already_written.add(option_value) printed_options = set() with columns[-1]: display_scrollable_results(JSON_results, test_results, OPT2, OPT3) # Close the custom container st.write('</div>', unsafe_allow_html=True) for idx, (test_name, result) in enumerate(sorted(test_results.items())): _, ind_opt1, ind_opt2, ind_opt3 = test_name.split('__') opt2_readable = "Use LeafMachine2" if OPT2[int(ind_opt2.split('-')[1])] else "Don't use LeafMachine2" opt3_readable = f"{OPT3[int(ind_opt3.split('-')[1])]}" if (opt2_readable, opt3_readable) not in printed_options: with columns[0]: st.info(f"{opt2_readable}") st.write('---') with columns[1]: st.info(f"{opt3_readable}") st.write('---') printed_options.add((opt2_readable, opt3_readable)) with columns[int(ind_opt1.split('-')[1]) + 2]: if result: st.success(f"Test Passed") else: st.error(f"Test Failed") st.write('---') # success_count = sum(1 for result in test_results.values() if result) # failure_count = len(test_results) - success_count # proportional_rain("🥇", success_count, "💔", failure_count, font_size=72, falling_speed=5, animation_length="infinite") rain_emojis(test_results) def add_emoji_delay(): time.sleep(0.3) def rain_emojis(test_results): # test_results = { # 'test1': True, # Test passed # 'test2': True, # Test passed # 'test3': True, # Test passed # 'test4': False, # Test failed # 'test5': False, # Test failed # 'test6': False, # Test failed # 'test7': False, # Test failed # 'test8': False, # Test failed # 'test9': False, # Test failed # 'test10': False, # Test failed # } success_emojis = ["🥇", "🏆", "🍾", "🙌"] failure_emojis = ["💔", "😭"] success_count = sum(1 for result in test_results.values() if result) failure_count = len(test_results) - success_count chosen_emoji = random.choice(success_emojis) for _ in range(success_count): rain( emoji=chosen_emoji, font_size=72, falling_speed=4, animation_length=2, ) add_emoji_delay() chosen_emoji = random.choice(failure_emojis) for _ in range(failure_count): rain( emoji=chosen_emoji, font_size=72, falling_speed=5, animation_length=1, ) add_emoji_delay() def get_prompt_versions(LLM_version): yaml_files = [f for f in os.listdir(os.path.join(st.session_state.dir_home, 'custom_prompts')) if f.endswith('.yaml')] if LLM_version in ["gpt-4-1106-preview", "GPT 4", "GPT 3.5", "Azure GPT 4", "Azure GPT 3.5"]: versions = ["Version 1", "Version 1 No Domain Knowledge", "Version 2"] return (versions + yaml_files, "Version 2") elif LLM_version in ["PaLM 2",]: versions = ["Version 1 PaLM 2", "Version 1 PaLM 2 No Domain Knowledge", "Version 2 PaLM 2"] return (versions + yaml_files, "Version 2 PaLM 2") else: # Handle other cases or raise an error return (yaml_files, None) def get_private_file(): dir_home = os.path.dirname(os.path.dirname(__file__)) path_cfg_private = os.path.join(dir_home, 'PRIVATE_DATA.yaml') return get_cfg_from_full_path(path_cfg_private) def create_space_saver(): st.subheader("Space Saving Options") col_ss_1, col_ss_2 = st.columns([2,2]) with col_ss_1: st.write("Several folders are created and populated with data during the VoucherVision transcription process.") st.write("Below are several options that will allow you to automatically delete temporary files that you may not need for everyday operations.") st.write("VoucherVision creates the following folders. Folders marked with a :star: are required if you want to use VoucherVisionEditor for quality control.") st.write("`../[Run Name]/Archival_Components`") st.write("`../[Run Name]/Config_File`") st.write("`../[Run Name]/Cropped_Images` :star:") st.write("`../[Run Name]/Logs`") st.write("`../[Run Name]/Original_Images` :star:") st.write("`../[Run Name]/Transcription` :star:") with col_ss_2: st.session_state.config['leafmachine']['project']['delete_temps_keep_VVE'] = st.checkbox("Delete Temporary Files (KEEP files required for VoucherVisionEditor)", st.session_state.config['leafmachine']['project'].get('delete_temps_keep_VVE', False)) st.session_state.config['leafmachine']['project']['delete_all_temps'] = st.checkbox("Keep only the final transcription file", st.session_state.config['leafmachine']['project'].get('delete_all_temps', False),help="*WARNING:* This limits your ability to do quality assurance. This will delete all folders created by VoucherVision, leaving only the `transcription.xlsx` file.") # def create_private_file(): # st.session_state.proceed_to_main = False # if st.session_state.private_file: # cfg_private = get_private_file() # create_private_file_0(cfg_private) # else: # st.title("VoucherVision") # create_private_file_0() def create_private_file(): st.session_state.proceed_to_main = False st.title("VoucherVision") col_private,_= st.columns([12,2]) if st.session_state.private_file: cfg_private = get_private_file() else: cfg_private = {} cfg_private['openai'] = {} cfg_private['openai']['OPENAI_API_KEY'] ='' cfg_private['openai_azure'] = {} cfg_private['openai_azure']['openai_api_key'] = '' cfg_private['openai_azure']['api_version'] = '' cfg_private['openai_azure']['openai_api_base'] ='' cfg_private['openai_azure']['openai_organization'] ='' cfg_private['openai_azure']['openai_api_type'] ='' cfg_private['google_cloud'] = {} cfg_private['google_cloud']['path_json_file'] ='' cfg_private['google_palm'] = {} cfg_private['google_palm']['google_palm_api'] ='' with col_private: st.header("Set API keys") st.info("***Note:*** There is a known bug with tabs in Streamlit. If you update an input field it may take you back to the 'Project Settings' tab. Changes that you made are saved, it's just an annoying glitch. We are aware of this issue and will fix it as soon as we can.") st.warning("To commit changes to API keys you must press the 'Set API Keys' button at the bottom of the page.") st.write("Before using VoucherVision you must set your API keys. All keys are stored locally on your computer and are never made public.") st.write("API keys are stored in `../VoucherVision/PRIVATE_DATA.yaml`.") st.write("Deleting this file will allow you to reset API keys. Alternatively, you can edit the keys in the user interface.") st.write("Leave keys blank if you do not intend to use that service.") st.write("---") st.subheader("Google Vision (*Required*)") st.markdown("VoucherVision currently uses [Google Vision API](https://cloud.google.com/vision/docs/ocr) for OCR. Generating an API key for this is more involved than the others. [Please carefully follow the instructions outlined here to create and setup your account.](https://cloud.google.com/vision/docs/setup) ") st.markdown(""" Once your account is created, [visit this page](https://console.cloud.google.com) and create a project. Then follow these instructions: - **Select your Project**: If you have multiple projects, ensure you select the one where you've enabled the Vision API. - **Open the Navigation Menu**: Click on the hamburger menu (three horizontal lines) in the top left corner. - **Go to IAM & Admin**: In the navigation pane, hover over "IAM & Admin" and then click on "Service accounts." - **Locate Your Service Account**: Find the service account for which you wish to download the JSON key. If you haven't created a service account yet, you'll need to do so by clicking the "CREATE SERVICE ACCOUNT" button at the top. - **Download the JSON Key**: - Click on the three dots (actions menu) on the right side of your service account name. - Select "Manage keys." - In the pop-up window, click on the "ADD KEY" button and select "JSON." - The JSON key file will automatically be downloaded to your computer. - **Store Safely**: This file contains sensitive data that can be used to authenticate and bill your Google Cloud account. Never commit it to public repositories or expose it in any way. Always keep it safe and secure. """) with st.container(): c_in_ocr, c_button_ocr = st.columns([10,2]) with c_in_ocr: google_vision = st.text_input(label = 'Full path to Google Cloud JSON API key file', value = cfg_private['google_cloud'].get('path_json_file', ''), placeholder = 'e.g. C:/Documents/Secret_Files/google_API/application_default_credentials.json', help ="This API Key is in the form of a JSON file. Please save the JSON file in a safe directory. DO NOT store the JSON key inside of the VoucherVision directory.", type='password',key='924857298734590283750932809238') with c_button_ocr: st.empty() st.write("---") st.subheader("OpenAI") st.markdown("API key for first-party OpenAI API. Create an account with OpenAI [here](https://platform.openai.com/signup), then create an API key [here](https://platform.openai.com/account/api-keys).") with st.container(): c_in_openai, c_button_openai = st.columns([10,2]) with c_in_openai: openai_api_key = st.text_input("openai_api_key", cfg_private['openai'].get('OPENAI_API_KEY', ''), help='The actual API key. Likely to be a string of 2 character, a dash, and then a 48-character string: sk-XXXXXXXX...', placeholder = 'e.g. sk-XXXXXXXX...', type='password') with c_button_openai: st.empty() st.write("---") st.subheader("OpenAI - Azure") st.markdown("This version OpenAI relies on Azure servers directly as is intended for private enterprise instances of OpenAI's services, such as [UM-GPT](https://its.umich.edu/computing/ai). Administrators will provide you with the following information.") azure_openai_api_version = st.text_input("azure_openai_api_version", cfg_private['openai_azure'].get('api_version', ''), help='API Version e.g. "2023-05-15"', placeholder = 'e.g. 2023-05-15', type='password') azure_openai_api_key = st.text_input("azure_openai_api_key", cfg_private['openai_azure'].get('openai_api_key', ''), help='The actual API key. Likely to be a 32-character string', placeholder = 'e.g. 12333333333333333333333333333332', type='password') azure_openai_api_base = st.text_input("azure_openai_api_base", cfg_private['openai_azure'].get('openai_api_base', ''), help='The base url for the API e.g. "https://api.umgpt.umich.edu/azure-openai-api"', placeholder = 'e.g. https://api.umgpt.umich.edu/azure-openai-api', type='password') azure_openai_organization = st.text_input("azure_openai_organization", cfg_private['openai_azure'].get('openai_organization', ''), help='Your organization code. Likely a short string', placeholder = 'e.g. 123456', type='password') azure_openai_api_type = st.text_input("azure_openai_api_type", cfg_private['openai_azure'].get('openai_api_type', ''), help='The API type. Typically "azure"', placeholder = 'e.g. azure', type='password') with st.container(): c_in_azure, c_button_azure = st.columns([10,2]) with c_button_azure: st.empty() st.write("---") st.subheader("Google PaLM 2") st.markdown('Follow these [instructions](https://developers.generativeai.google/tutorials/setup) to generate an API key for PaLM 2. You may need to also activate an account with [MakerSuite](https://makersuite.google.com/app/apikey) and enable "early access."') with st.container(): c_in_palm, c_button_palm = st.columns([10,2]) with c_in_palm: google_palm = st.text_input("Google PaLM 2 API Key", cfg_private['google_palm'].get('google_palm_api', ''), help='The MakerSuite API key e.g. a 32-character string', placeholder='e.g. SATgthsykuE64FgrrrrEervr3S4455t_geyDeGq', type='password') with st.container(): with c_button_ocr: st.write("##") st.button("Test OCR", on_click=test_API, args=['google_vision',c_in_ocr, cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key, azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm]) with st.container(): with c_button_openai: st.write("##") st.button("Test OpenAI", on_click=test_API, args=['openai',c_in_openai, cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key, azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm]) with st.container(): with c_button_azure: st.write("##") st.button("Test Azure OpenAI", on_click=test_API, args=['azure_openai',c_in_azure, cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key, azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm]) with st.container(): with c_button_palm: st.write("##") st.button("Test PaLM 2", on_click=test_API, args=['palm',c_in_palm, cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key, azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm]) st.button("Set API Keys",type='primary', on_click=save_changes_to_API_keys, args=[cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key, azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm]) if st.button('Proceed to VoucherVision'): st.session_state.proceed_to_private = False st.session_state.proceed_to_main = True def test_API(api, message_loc, cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key, azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm): # Save the API keys save_changes_to_API_keys(cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key,azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm) with st.spinner('Performing validation checks...'): if api == 'google_vision': print("*** Google Vision OCR API Key ***") try: demo_config_path = os.path.join(st.session_state.dir_home,'demo','validation_configs','google_vision_ocr_test.yaml') demo_images_path = os.path.join(st.session_state.dir_home, 'demo', 'demo_images') demo_out_path = os.path.join(st.session_state.dir_home, 'demo', 'demo_output','run_name') create_google_ocr_yaml_config(demo_config_path, demo_images_path, demo_out_path) voucher_vision_OCR_test(demo_config_path, st.session_state.dir_home, None, demo_images_path) with message_loc: st.success("Google Vision OCR API Key Valid :white_check_mark:") return True except Exception as e: with message_loc: st.error(f"Google Vision OCR API Key Failed! {e}") return False elif api == 'openai': print("*** OpenAI API Key ***") try: if run_api_tests('openai'): with message_loc: st.success("OpenAI API Key Valid :white_check_mark:") else: with message_loc: st.error("OpenAI API Key Failed:exclamation:") return False except Exception as e: with message_loc: st.error(f"OpenAI API Key Failed:exclamation: {e}") elif api == 'azure_openai': print("*** Azure OpenAI API Key ***") try: if run_api_tests('azure_openai'): with message_loc: st.success("Azure OpenAI API Key Valid :white_check_mark:") else: with message_loc: st.error(f"Azure OpenAI API Key Failed:exclamation:") return False except Exception as e: with message_loc: st.error(f"Azure OpenAI API Key Failed:exclamation: {e}") elif api == 'palm': print("*** Google PaLM 2 API Key ***") try: if run_api_tests('palm'): with message_loc: st.success("Google PaLM 2 API Key Valid :white_check_mark:") else: with message_loc: st.error("Google PaLM 2 API Key Failed:exclamation:") return False except Exception as e: with message_loc: st.error(f"Google PaLM 2 API Key Failed:exclamation: {e}") def save_changes_to_API_keys(cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key, azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm): # Update the configuration dictionary with the new values cfg_private['openai']['OPENAI_API_KEY'] = openai_api_key cfg_private['openai_azure']['api_version'] = azure_openai_api_version cfg_private['openai_azure']['openai_api_key'] = azure_openai_api_key cfg_private['openai_azure']['openai_api_base'] = azure_openai_api_base cfg_private['openai_azure']['openai_organization'] = azure_openai_organization cfg_private['openai_azure']['openai_api_type'] = azure_openai_api_type cfg_private['google_cloud']['path_json_file'] = google_vision cfg_private['google_palm']['google_palm_api'] = google_palm # Call the function to write the updated configuration to the YAML file write_config_file(cfg_private, st.session_state.dir_home, filename="PRIVATE_DATA.yaml") st.session_state.private_file = does_private_file_exist() # Function to load a YAML file and update session_state def load_prompt_yaml(filename): with open(filename, 'r') as file: st.session_state['prompt_info'] = yaml.safe_load(file) st.session_state['prompt_author'] = st.session_state['prompt_info'].get('prompt_author', st.session_state['default_prompt_author']) st.session_state['prompt_author_institution'] = st.session_state['prompt_info'].get('prompt_author_institution', st.session_state['default_prompt_author_institution']) st.session_state['prompt_description'] = st.session_state['prompt_info'].get('prompt_description', st.session_state['default_prompt_description']) st.session_state['instructions'] = st.session_state['prompt_info'].get('instructions', st.session_state['default_instructions']) st.session_state['json_formatting_instructions'] = st.session_state['prompt_info'].get('json_formatting_instructions', st.session_state['default_json_formatting_instructions'] ) st.session_state['rules'] = st.session_state['prompt_info'].get('rules', {}) st.session_state['mapping'] = st.session_state['prompt_info'].get('mapping', {}) st.session_state['LLM'] = st.session_state['prompt_info'].get('LLM', 'gpt') # Placeholder: st.session_state['assigned_columns'] = list(chain.from_iterable(st.session_state['mapping'].values())) def save_prompt_yaml(filename): yaml_content = { 'prompt_author': st.session_state['prompt_author'], 'prompt_author_institution': st.session_state['prompt_author_institution'], 'prompt_description': st.session_state['prompt_description'], 'LLM': st.session_state['LLM'], 'instructions': st.session_state['instructions'], 'json_formatting_instructions': st.session_state['json_formatting_instructions'], 'rules': st.session_state['rules'], 'mapping': st.session_state['mapping'], } dir_prompt = os.path.join(st.session_state.dir_home, 'custom_prompts') filepath = os.path.join(dir_prompt, f"{filename}.yaml") with open(filepath, 'w') as file: yaml.safe_dump(dict(yaml_content), file, sort_keys=False) st.success(f"Prompt saved as '{filename}.yaml'.") def check_unique_mapping_assignments(): if len(st.session_state['assigned_columns']) != len(set(st.session_state['assigned_columns'])): st.error("Each column name must be assigned to only one category.") return False else: st.success("Mapping confirmed.") return True def check_prompt_yaml_filename(fname): # Check if the filename only contains letters, numbers, underscores, and dashes pattern = r'^[\w-]+$' # The \w matches any alphanumeric character and is equivalent to the character class [a-zA-Z0-9_]. # The hyphen - is literally matched. if re.match(pattern, fname): return True else: return False def btn_load_prompt(selected_yaml_file, dir_prompt): if selected_yaml_file: yaml_file_path = os.path.join(dir_prompt, selected_yaml_file) load_prompt_yaml(yaml_file_path) elif not selected_yaml_file: # Directly assigning default values since no file is selected st.session_state['prompt_info'] = {} st.session_state['prompt_author'] = st.session_state['default_prompt_author'] st.session_state['prompt_author_institution'] = st.session_state['default_prompt_author_institution'] st.session_state['prompt_description'] = st.session_state['default_prompt_description'] st.session_state['instructions'] = st.session_state['default_instructions'] st.session_state['json_formatting_instructions'] = st.session_state['default_json_formatting_instructions'] st.session_state['rules'] = {} st.session_state['LLM'] = 'gpt' st.session_state['assigned_columns'] = [] st.session_state['prompt_info'] = { 'prompt_author': st.session_state['prompt_author'], 'prompt_author_institution': st.session_state['prompt_author_institution'], 'prompt_description': st.session_state['prompt_description'], 'instructions': st.session_state['instructions'], 'json_formatting_instructions': st.session_state['json_formatting_instructions'], 'rules': st.session_state['rules'], 'mapping': st.session_state['mapping'], 'LLM': st.session_state['LLM'] } def build_LLM_prompt_config(): st.session_state['assigned_columns'] = [] st.session_state['default_prompt_author'] = 'unknown' st.session_state['default_prompt_author_institution'] = 'unknown' st.session_state['default_prompt_description'] = 'unknown' st.session_state['default_instructions'] = """1. Refactor the unstructured OCR text into a dictionary based on the JSON structure outlined below. 2. You should map the unstructured OCR text to the appropriate JSON key and then populate the field based on its rules. 3. Some JSON key fields are permitted to remain empty if the corresponding information is not found in the unstructured OCR text. 4. Ignore any information in the OCR text that doesn't fit into the defined JSON structure. 5. Duplicate dictionary fields are not allowed. 6. Ensure that all JSON keys are in lowercase. 7. Ensure that new JSON field values follow sentence case capitalization. 8. Ensure all key-value pairs in the JSON dictionary strictly adhere to the format and data types specified in the template. 9. Ensure the output JSON string is valid JSON format. It should not have trailing commas or unquoted keys. 10. Only return a JSON dictionary represented as a string. You should not explain your answer.""" st.session_state['default_json_formatting_instructions'] = """The next section of instructions outlines how to format the JSON dictionary. The keys are the same as those of the final formatted JSON object. For each key there is a format requirement that specifies how to transcribe the information for that key. The possible formatting options are: 1. "verbatim transcription" - field is populated with verbatim text from the unformatted OCR. 2. "spell check transcription" - field is populated with spelling corrected text from the unformatted OCR. 3. "boolean yes no" - field is populated with only yes or no. 4. "boolean 1 0" - field is populated with only 1 or 0. 5. "integer" - field is populated with only an integer. 6. "[list]" - field is populated from one of the values in the list. 7. "yyyy-mm-dd" - field is populated with a date in the format year-month-day. The desired null value is also given. Populate the field with the null value of the information for that key is not present in the unformatted OCR text.""" # Start building the Streamlit app col_prompt_main_left, ___, col_prompt_main_right = st.columns([6,1,3]) with col_prompt_main_left: st.title("Custom LLM Prompt Builder") st.subheader('About') st.write("This form allows you to craft a prompt for your specific task.") st.subheader('How it works') st.write("1. Edit this page until you are happy with your instructions. We recommend looking at the basic structure, writing down your prompt inforamtion in a Word document so that it does not randomly disappear, and then copying and pasting that info into this form once your whole prompt structure is defined.") st.write("2. After you enter all of your prompt instructions, click 'Save' and give your file a name.") st.write("3. This file will be saved as a yaml configuration file in the `..VoucherVision/custom_prompts` folder.") st.write("4. When you go back the main VoucherVision page you will now see your custom prompt available in the 'Prompt Version' dropdown menu.") st.write("5. Select your custom prompt. Note, your prompt will only be available for the LLM that you set when filling out the form below.") dir_prompt = os.path.join(st.session_state.dir_home, 'custom_prompts') yaml_files = [f for f in os.listdir(dir_prompt) if f.endswith('.yaml')] col_load_text, col_load_btn = st.columns([8,2]) with col_load_text: # Dropdown for selecting a YAML file selected_yaml_file = st.selectbox('Select a prompt YAML file to load:', [''] + yaml_files) with col_load_btn: st.write('##') # Button to load the selected prompt st.button('Load Prompt', on_click=btn_load_prompt, args=[selected_yaml_file, dir_prompt]) # Prompt Author Information st.header("Prompt Author Information") st.write("We value community contributions! Please provide your name(s) (or pseudonym if you prefer) for credit. If you leave this field blank, it will say 'unknown'.") st.session_state['prompt_author'] = st.text_input("Enter names of prompt author(s)", value=st.session_state['default_prompt_author']) st.write("Please provide your institution name. If you leave this field blank, it will say 'unknown'.") st.session_state['prompt_author_institution'] = st.text_input("Enter name of institution", value=st.session_state['default_prompt_author_institution']) st.write("Please provide a description of your prompt and its intended task. Is it designed for a specific collection? Taxa? Database structure?") st.session_state['prompt_description'] = st.text_input("Enter description of prompt", value=st.session_state['default_prompt_description']) st.write('---') st.header("Set LLM Model Type") # Define the options for the dropdown llm_options = ['gpt', 'palm'] # Create the dropdown and set the value to session_state['LLM'] st.write("Which LLM is this prompt designed for? This will not restrict its use to a specific LLM, but some prompts will behave in different ways across models.") st.write("For example, VoucherVision will automatically add multiple JSON formatting blocks to all PaLM 2 prompts to coax PaLM 2 to return a valid JSON object.") st.session_state['LLM'] = st.selectbox('Set LLM', llm_options, index=llm_options.index(st.session_state.get('LLM', 'gpt'))) st.write('---') # Instructions Section st.header("Instructions") st.write("These are the general instructions that guide the LLM through the transcription task. We recommend using the default instructions unless you have a specific reason to change them.") st.session_state['instructions'] = st.text_area("Enter instructions", value=st.session_state['default_instructions'].strip(), height=350, disabled=True) st.write('---') # Column Instructions Section st.header("JSON Formatting Instructions") st.write("The following section tells the LLM how we want to structure the JSON dictionary. We do not recommend changing this section because it would likely result in unstable and inconsistent behavior.") st.session_state['json_formatting_instructions'] = st.text_area("Enter column instructions", value=st.session_state['default_json_formatting_instructions'], height=350, disabled=True) st.write('---') col_left, col_right = st.columns([6,4]) with col_left: st.subheader('Add/Edit Columns') # Initialize rules in session state if not already present if 'rules' not in st.session_state or not st.session_state['rules']: st.session_state['rules']['Dictionary'] = { "catalog_number": { "format": "verbatim transcription", "null_value": "", "description": "The barcode identifier, typically a number with at least 6 digits, but fewer than 30 digits." } } st.session_state['rules']['SpeciesName'] = { "taxonomy": ["Genus_species"] } # Layout for adding a new column name # col_text, col_textbtn = st.columns([8, 2]) # with col_text: new_column_name = st.text_input("Enter a new column name:") # with col_textbtn: # st.write('##') if st.button("Add New Column") and new_column_name: if new_column_name not in st.session_state['rules']['Dictionary']: st.session_state['rules']['Dictionary'][new_column_name] = {"format": "", "null_value": "", "description": ""} st.success(f"New column '{new_column_name}' added. Now you can edit its properties.") else: st.error("Column name already exists. Please enter a unique column name.") # Get columns excluding the protected "catalog_number" st.write('#') editable_columns = [col for col in st.session_state['rules']['Dictionary'] if col != "catalog_number"] column_name = st.selectbox("Select a column to edit:", [""] + editable_columns) # Handle rules editing current_rule = st.session_state['rules']['Dictionary'].get(column_name, { "format": "", "null_value": "", "description": "" }) if 'selected_column' not in st.session_state: st.session_state['selected_column'] = column_name # Form for input fields with st.form(key='rule_form'): format_options = ["verbatim transcription", "spell check transcription", "boolean yes no", "boolean 1 0", "integer", "[list]", "yyyy-mm-dd"] current_rule["format"] = st.selectbox("Format:", format_options, index=format_options.index(current_rule["format"]) if current_rule["format"] else 0) current_rule["null_value"] = st.text_input("Null value:", value=current_rule["null_value"]) current_rule["description"] = st.text_area("Description:", value=current_rule["description"]) commit_button = st.form_submit_button("Commit Column") default_rule = { "format": format_options[0], # default format "null_value": "", # default null value "description": "", # default description } if st.session_state['selected_column'] != column_name: # Column has changed. Update the session_state selected column. st.session_state['selected_column'] = column_name # Reset the current rule to the default for this new column, or a blank rule if not set. current_rule = st.session_state['rules']['Dictionary'].get(column_name, default_rule.copy()) # Handle commit action if commit_button and column_name: # Commit the rules to the session state. st.session_state['rules']['Dictionary'][column_name] = current_rule.copy() st.success(f"Column '{column_name}' added/updated in rules.") # Force the form to reset by clearing the fields from the session state st.session_state.pop('selected_column', None) # Clear the selected column to force reset # st.session_state['rules'][column_name] = current_rule # st.success(f"Column '{column_name}' added/updated in rules.") # # Reset current_rule to default values for the next input # current_rule["format"] = default_rule["format"] # current_rule["null_value"] = default_rule["null_value"] # current_rule["description"] = default_rule["description"] # # To ensure that the form fields are reset, we can clear them from the session state # for key in current_rule.keys(): # st.session_state[key] = default_rule[key] # Layout for removing an existing column # del_col, del_colbtn = st.columns([8, 2]) # with del_col: delete_column_name = st.selectbox("Select a column to delete:", [""] + editable_columns, key='delete_column') # with del_colbtn: # st.write('##') if st.button("Delete Column") and delete_column_name: del st.session_state['rules'][delete_column_name] st.success(f"Column '{delete_column_name}' removed from rules.") with col_right: # Display the current state of the JSON rules st.subheader('Formatted Columns') st.json(st.session_state['rules']['Dictionary']) # st.subheader('All Prompt Info') # st.json(st.session_state['prompt_info']) st.write('---') col_left_mapping, col_right_mapping = st.columns([6,4]) with col_left_mapping: st.header("Mapping") st.write("Assign each column name to a single category.") st.session_state['refresh_mapping'] = False # Dynamically create a list of all column names that can be assigned # This assumes that the column names are the keys in the dictionary under 'rules' all_column_names = list(st.session_state['rules']['Dictionary'].keys()) categories = ['TAXONOMY', 'GEOGRAPHY', 'LOCALITY', 'COLLECTING', 'MISCELLANEOUS'] if ('mapping' not in st.session_state) or (st.session_state['mapping'] == {}): st.session_state['mapping'] = {category: [] for category in categories} for category in categories: # Filter out the already assigned columns available_columns = [col for col in all_column_names if col not in st.session_state['assigned_columns'] or col in st.session_state['mapping'].get(category, [])] # Ensure the current mapping is a subset of the available options current_mapping = [col for col in st.session_state['mapping'].get(category, []) if col in available_columns] # Provide a safe default if the current mapping is empty or contains invalid options safe_default = current_mapping if all(col in available_columns for col in current_mapping) else [] # Create a multi-select widget for the category with a safe default selected_columns = st.multiselect( f"Select columns for {category}:", available_columns, default=safe_default, key=f"mapping_{category}" ) # Update the assigned_columns based on the selections for col in current_mapping: if col not in selected_columns and col in st.session_state['assigned_columns']: st.session_state['assigned_columns'].remove(col) st.session_state['refresh_mapping'] = True for col in selected_columns: if col not in st.session_state['assigned_columns']: st.session_state['assigned_columns'].append(col) st.session_state['refresh_mapping'] = True # Update the mapping in session state when there's a change st.session_state['mapping'][category] = selected_columns if st.session_state['refresh_mapping']: st.session_state['refresh_mapping'] = False # Button to confirm and save the mapping configuration if st.button('Confirm Mapping'): if check_unique_mapping_assignments(): # Proceed with further actions since the mapping is confirmed and unique pass with col_right_mapping: # Display the current state of the JSON rules st.subheader('Formatted Column Maps') st.json(st.session_state['mapping']) col_left_save, col_right_save = st.columns([6,4]) with col_left_save: # Input for new file name new_filename = st.text_input("Enter filename to save your prompt as a configuration YAML:",placeholder='my_prompt_name') # Button to save the new YAML file if st.button('Save YAML', type='primary'): if new_filename: if check_unique_mapping_assignments(): if check_prompt_yaml_filename(new_filename): save_prompt_yaml(new_filename) else: st.error("File name can only contain letters, numbers, underscores, and dashes. Cannot contain spaces.") else: st.error("Mapping contains an error. Make sure that each column is assigned to only ***one*** category.") else: st.error("Please enter a filename.") if st.button('Exit'): st.session_state.proceed_to_build_llm_prompt = False st.session_state.proceed_to_main = True st.rerun() with col_prompt_main_right: st.subheader('All Prompt Components') st.session_state['prompt_info'] = { 'prompt_author': st.session_state['prompt_author'], 'prompt_author_institution': st.session_state['prompt_author_institution'], 'prompt_description': st.session_state['prompt_description'], 'LLM': st.session_state['LLM'], 'instructions': st.session_state['instructions'], 'json_formatting_instructions': st.session_state['json_formatting_instructions'], 'rules': st.session_state['rules'], 'mapping': st.session_state['mapping'], } st.json(st.session_state['prompt_info']) def show_header_welcome(): st.session_state.logo_path = os.path.join(st.session_state.dir_home, 'img','logo.png') st.session_state.logo = Image.open(st.session_state.logo_path) st.image(st.session_state.logo, width=250) def determine_n_images(): try: # Check if 'dir_uploaded_images' key exists and it is not empty if 'dir_uploaded_images' in st and st['dir_uploaded_images']: dir_path = st['dir_uploaded_images'] # This would be the path to the directory return len([f for f in os.listdir(dir_path) if os.path.isfile(os.path.join(dir_path, f))]) else: return None except: return None def content_header(): col_run_1, col_run_2, col_run_3 = st.columns([4,4,2]) col_test = st.container() st.write("") st.write("") st.write("") st.write("") st.subheader("Overall Progress") col_run_info_1 = st.columns([1])[0] st.write("") st.write("") st.write("") st.write("") st.header("Configuration Settings") with col_run_info_1: # Progress # Progress # st.subheader('Project') # bar = st.progress(0) # new_text = st.empty() # Placeholder for current step name # progress_report = ProgressReportVV(bar, new_text, n_images=10) # Progress overall_progress_bar = st.progress(0) text_overall = st.empty() # Placeholder for current step name st.subheader('Transcription Progress') batch_progress_bar = st.progress(0) text_batch = st.empty() # Placeholder for current step name progress_report = ProgressReport(overall_progress_bar, batch_progress_bar, text_overall, text_batch) st.info("***Note:*** There is a known bug with tabs in Streamlit. If you update an input field it may take you back to the 'Project Settings' tab. Changes that you made are saved, it's just an annoying glitch. We are aware of this issue and will fix it as soon as we can.") st.write("If you use VoucherVision frequently, you can change the default values that are auto-populated in the form below. In a text editor or IDE, edit the first few rows in the file `../VoucherVision/vouchervision/VoucherVision_Config_Builder.py`") with col_run_1: show_header_welcome() st.subheader('Run VoucherVision') N_STEPS = 6 if determine_n_images(): st.session_state['processing_add_on'] = f" {determine_n_images()} Images" else: st.session_state['processing_add_on'] = '' if check_if_usable(): if st.button(f"Start Processing{st.session_state['processing_add_on']}", type='primary'): # Define number of overall steps progress_report.set_n_overall(N_STEPS) progress_report.update_overall(f"Starting VoucherVision...") # First, write the config file. write_config_file(st.session_state.config, st.session_state.dir_home, filename="VoucherVision.yaml") path_custom_prompts = os.path.join(st.session_state.dir_home,'custom_prompts',st.session_state.config['leafmachine']['project']['prompt_version']) # Call the machine function. last_JSON_response, total_cost = voucher_vision(None, st.session_state.dir_home, path_custom_prompts, None, progress_report,path_api_cost=os.path.join(st.session_state.dir_home,'api_cost','api_cost.yaml'), is_real_run=True) if total_cost: st.success(f":money_with_wings: This run cost :heavy_dollar_sign:{total_cost:.4f}") # Format the JSON string for display. if last_JSON_response is None: st.markdown(f"Last JSON object in the batch: NONE") else: try: formatted_json = json.dumps(json.loads(last_JSON_response), indent=4, sort_keys=False) except: formatted_json = json.dumps(last_JSON_response, indent=4, sort_keys=False) st.markdown(f"Last JSON object in the batch:\n```\n{formatted_json}\n```") st.balloons() else: st.button("Start Processing", type='primary', disabled=True) st.error(":heavy_exclamation_mark: Required API keys not set. Please visit the 'API Keys' tab and set the Google Vision OCR API key and at least one LLM key.") st.button("Refresh", on_click=refresh) with col_run_2: if st.button("Test GPT"): progress_report.set_n_overall(TestOptionsGPT.get_length()) test_results, JSON_results = run_demo_tests_GPT(progress_report) with col_test: display_test_results(test_results, JSON_results, 'gpt') st.balloons() if st.button("Test PaLM2"): progress_report.set_n_overall(TestOptionsPalm.get_length()) test_results, JSON_results = run_demo_tests_Palm(progress_report) with col_test: display_test_results(test_results, JSON_results, 'palm') st.balloons() with col_run_3: st.subheader('Check GPU') if st.button("GPU"): success, info = test_GPU() if success: st.balloons() for message in info: st.success(message) else: for message in info: st.error(message) def content_tab_settings(): st.header('Project') col_project_1, col_project_2 = st.columns([4,2]) st.write("---") st.header('Input Images') col_local_1, col_local_2 = st.columns([4,2]) st.write("---") st.header('LeafMachine2 Label Collage') col_cropped_1, col_cropped_2 = st.columns([4,4]) st.write("---") st.header('OCR Overlay Image') col_ocr_1, col_ocr_2 = st.columns([4,4]) os.path.join(st.session_state.dir_home, ) ### Project with col_project_1: st.session_state.config['leafmachine']['project']['run_name'] = st.text_input("Run name", st.session_state.config['leafmachine']['project'].get('run_name', '')) st.session_state.config['leafmachine']['project']['dir_output'] = st.text_input("Output directory", st.session_state.config['leafmachine']['project'].get('dir_output', '')) ### Input Images Local with col_local_1: st.session_state.config['leafmachine']['project']['dir_images_local'] = st.text_input("Input images directory", st.session_state.config['leafmachine']['project'].get('dir_images_local', '')) st.session_state.config['leafmachine']['project']['continue_run_from_partial_xlsx'] = st.text_input("Continue run from partially completed project XLSX", st.session_state.config['leafmachine']['project'].get('continue_run_from_partial_xlsx', ''), disabled=True) st.write("---") st.subheader('LLM Version') st.markdown( """ ***Note:*** GPT-4 is 20x more expensive than GPT-3.5 """ ) st.session_state.config['leafmachine']['LLM_version'] = st.selectbox("LLM version", ["gpt-4-1106-preview", "GPT 4", "GPT 3.5", "Azure GPT 4", "Azure GPT 3.5", "PaLM 2"], index=["gpt-4-1106-preview", "GPT 4", "GPT 3.5", "Azure GPT 4", "Azure GPT 3.5", "PaLM 2"].index(st.session_state.config['leafmachine'].get('LLM_version', 'Azure GPT 4'))) st.write("---") st.subheader('Prompt Version') versions, default_version = get_prompt_versions(st.session_state.config['leafmachine']['LLM_version']) if versions: selected_version = st.session_state.config['leafmachine']['project'].get('prompt_version', default_version) if selected_version not in versions: selected_version = default_version st.session_state.config['leafmachine']['project']['prompt_version'] = st.selectbox("Prompt Version", versions, index=versions.index(selected_version)) with col_cropped_1: default_crops = st.session_state.config['leafmachine']['cropped_components'].get('save_cropped_annotations', ['leaf_whole']) st.write("Prior to transcription, use LeafMachine2 to crop all labels from input images to create label collages for each specimen image. (Requires GPU)") st.session_state.config['leafmachine']['use_RGB_label_images'] = st.checkbox("Use LeafMachine2 label collage for transcriptions", st.session_state.config['leafmachine'].get('use_RGB_label_images', False)) st.session_state.config['leafmachine']['cropped_components']['save_cropped_annotations'] = st.multiselect("Components to crop", ['ruler', 'barcode','label', 'colorcard','map','envelope','photo','attached_item','weights', 'leaf_whole', 'leaf_partial', 'leaflet', 'seed_fruit_one', 'seed_fruit_many', 'flower_one', 'flower_many', 'bud','specimen','roots','wood'],default=default_crops) with col_cropped_2: ba = os.path.join(st.session_state.dir_home,'demo', 'ba','ba2.png') image = Image.open(ba) st.image(image, caption='LeafMachine2 Collage', output_format = "PNG") with col_ocr_1: st.write('This will plot bounding boxes around all text that Google Vision was able to detect. If there are no boxes around text, then the OCR failed, so that missing text will not be seen by the LLM when it is creating the JSON object. The created image will be viewable in the VoucherVisionEditor.') st.session_state.config['leafmachine']['do_create_OCR_helper_image'] = st.checkbox("Create image showing an overlay of the OCR detections", st.session_state.config['leafmachine'].get('do_create_OCR_helper_image', False)) with col_ocr_2: ocr = os.path.join(st.session_state.dir_home,'demo', 'ba','ocr.png') image_ocr = Image.open(ocr) st.image(image_ocr, caption='OCR Overlay Images', output_format = "PNG") def content_tab_component(): st.header('Archival Components') ACD_version = st.selectbox("Archival Component Detector (ACD) Version", ["Version 2.1", "Version 2.2"]) ACD_confidence_default = int(st.session_state.config['leafmachine']['archival_component_detector']['minimum_confidence_threshold'] * 100) ACD_confidence = st.number_input("ACD Confidence Threshold (%)", min_value=0, max_value=100,value=ACD_confidence_default) st.session_state.config['leafmachine']['archival_component_detector']['minimum_confidence_threshold'] = float(ACD_confidence/100) st.session_state.config['leafmachine']['archival_component_detector']['do_save_prediction_overlay_images'] = st.checkbox("Save Archival Prediction Overlay Images", st.session_state.config['leafmachine']['archival_component_detector'].get('do_save_prediction_overlay_images', True)) st.session_state.config['leafmachine']['archival_component_detector']['ignore_objects_for_overlay'] = st.multiselect("Hide Archival Components in Prediction Overlay Images", ['ruler', 'barcode','label', 'colorcard','map','envelope','photo','attached_item','weights',], default=[]) # Depending on the selected version, set the configuration if ACD_version == "Version 2.1": st.session_state.config['leafmachine']['archival_component_detector']['detector_type'] = 'Archival_Detector' st.session_state.config['leafmachine']['archival_component_detector']['detector_version'] = 'PREP_final' st.session_state.config['leafmachine']['archival_component_detector']['detector_iteration'] = 'PREP_final' st.session_state.config['leafmachine']['archival_component_detector']['detector_weights'] = 'best.pt' elif ACD_version == "Version 2.2": #TODO update this to version 2.2 st.session_state.config['leafmachine']['archival_component_detector']['detector_type'] = 'Archival_Detector' st.session_state.config['leafmachine']['archival_component_detector']['detector_version'] = 'PREP_final' st.session_state.config['leafmachine']['archival_component_detector']['detector_iteration'] = 'PREP_final' st.session_state.config['leafmachine']['archival_component_detector']['detector_weights'] = 'best.pt' def content_tab_processing(): st.header('Processing Options') col_processing_1, col_processing_2 = st.columns([2,2,]) with col_processing_1: st.subheader('Compute Options') st.session_state.config['leafmachine']['project']['num_workers'] = st.number_input("Number of CPU workers", value=st.session_state.config['leafmachine']['project'].get('num_workers', 1), disabled=True) st.session_state.config['leafmachine']['project']['batch_size'] = st.number_input("Batch size", value=st.session_state.config['leafmachine']['project'].get('batch_size', 500), help='Sets the batch size for the LeafMachine2 cropping. If computer RAM is filled, lower this value to ~100.') with col_processing_2: st.subheader('Misc') st.session_state.config['leafmachine']['project']['prefix_removal'] = st.text_input("Remove prefix from catalog number", st.session_state.config['leafmachine']['project'].get('prefix_removal', '')) st.session_state.config['leafmachine']['project']['suffix_removal'] = st.text_input("Remove suffix from catalog number", st.session_state.config['leafmachine']['project'].get('suffix_removal', '')) st.session_state.config['leafmachine']['project']['catalog_numerical_only'] = st.checkbox("Require 'Catalog Number' to be numerical only", st.session_state.config['leafmachine']['project'].get('catalog_numerical_only', True)) ### Logging and Image Validation - col_v1 st.header('Logging and Image Validation') col_v1, col_v2 = st.columns(2) with col_v1: st.session_state.config['leafmachine']['do']['check_for_illegal_filenames'] = st.checkbox("Check for illegal filenames", st.session_state.config['leafmachine']['do'].get('check_for_illegal_filenames', True)) st.session_state.config['leafmachine']['do']['check_for_corrupt_images_make_vertical'] = st.checkbox("Check for corrupt images", st.session_state.config['leafmachine']['do'].get('check_for_corrupt_images_make_vertical', True)) st.session_state.config['leafmachine']['print']['verbose'] = st.checkbox("Print verbose", st.session_state.config['leafmachine']['print'].get('verbose', True)) st.session_state.config['leafmachine']['print']['optional_warnings'] = st.checkbox("Show optional warnings", st.session_state.config['leafmachine']['print'].get('optional_warnings', True)) with col_v2: log_level = st.session_state.config['leafmachine']['logging'].get('log_level', None) log_level_display = log_level if log_level is not None else 'default' selected_log_level = st.selectbox("Logging Level", ['default', 'DEBUG', 'INFO', 'WARNING', 'ERROR'], index=['default', 'DEBUG', 'INFO', 'WARNING', 'ERROR'].index(log_level_display)) if selected_log_level == 'default': st.session_state.config['leafmachine']['logging']['log_level'] = None else: st.session_state.config['leafmachine']['logging']['log_level'] = selected_log_level def content_tab_domain(): st.header('Embeddings Database') col_emb_1, col_emb_2 = st.columns([4,2]) with col_emb_1: st.markdown( """ VoucherVision includes the option of using domain knowledge inside of the dynamically generated prompts. The OCR text is queried against a database of existing label transcriptions. The most similar existing transcriptions act as an example of what the LLM should emulate and are shown to the LLM as JSON objects. VoucherVision uses cosine similarity search to return the most similar existing transcription. - Note: Using domain knowledge may increase the chance that foreign text is included in the final transcription - Disabling this feature will show the LLM multiple examples of an empty JSON skeleton structure instead - Enabling this option requires a GPU with at least 8GB of VRAM - The domain knowledge files can be located in the directory "../VoucherVision/domain_knowledge". On first run the embeddings database must be created, which takes time. If the database creation runs each time you use VoucherVision, then something is wrong. """ ) st.write(f"Domain Knowledge is only available for the following prompts:") for available_prompts in PROMPTS_THAT_NEED_DOMAIN_KNOWLEDGE: st.markdown(f"- {available_prompts}") if st.session_state.config['leafmachine']['project']['prompt_version'] in PROMPTS_THAT_NEED_DOMAIN_KNOWLEDGE: st.session_state.config['leafmachine']['project']['use_domain_knowledge'] = st.checkbox("Use domain knowledge", True, disabled=True) else: st.session_state.config['leafmachine']['project']['use_domain_knowledge'] = st.checkbox("Use domain knowledge", False, disabled=True) st.write("") if st.session_state.config['leafmachine']['project']['use_domain_knowledge']: st.session_state.config['leafmachine']['project']['embeddings_database_name'] = st.text_input("Embeddings database name (only use underscores)", st.session_state.config['leafmachine']['project'].get('embeddings_database_name', '')) st.session_state.config['leafmachine']['project']['build_new_embeddings_database'] = st.checkbox("Build *new* embeddings database", st.session_state.config['leafmachine']['project'].get('build_new_embeddings_database', False)) st.session_state.config['leafmachine']['project']['path_to_domain_knowledge_xlsx'] = st.text_input("Path to domain knowledge CSV file (will be used to create new embeddings database)", st.session_state.config['leafmachine']['project'].get('path_to_domain_knowledge_xlsx', '')) else: st.session_state.config['leafmachine']['project']['embeddings_database_name'] = st.text_input("Embeddings database name (only use underscores)", st.session_state.config['leafmachine']['project'].get('embeddings_database_name', ''), disabled=True) st.session_state.config['leafmachine']['project']['build_new_embeddings_database'] = st.checkbox("Build *new* embeddings database", st.session_state.config['leafmachine']['project'].get('build_new_embeddings_database', False), disabled=True) st.session_state.config['leafmachine']['project']['path_to_domain_knowledge_xlsx'] = st.text_input("Path to domain knowledge CSV file (will be used to create new embeddings database)", st.session_state.config['leafmachine']['project'].get('path_to_domain_knowledge_xlsx', ''), disabled=True) def render_expense_report_summary(): expense_summary = st.session_state.expense_summary expense_report = st.session_state.expense_report st.header('Expense Report Summary') if expense_summary: st.metric(label="Total Cost", value=f"${round(expense_summary['total_cost_sum'], 4):,}") col1, col2 = st.columns(2) # Run count and total costs with col1: st.metric(label="Run Count", value=expense_summary['run_count']) st.metric(label="Tokens In", value=f"{expense_summary['tokens_in_sum']:,}") # Token information with col2: st.metric(label="Total Images", value=expense_summary['n_images_sum']) st.metric(label="Tokens Out", value=f"{expense_summary['tokens_out_sum']:,}") # Calculate cost proportion per image for each API version st.subheader('Average Cost per Image by API Version') cost_labels = [] cost_values = [] total_images = 0 cost_per_image_dict = {} # Iterate through the expense report to accumulate costs and image counts for index, row in expense_report.iterrows(): api_version = row['api_version'] total_cost = row['total_cost'] n_images = row['n_images'] total_images += n_images # Keep track of total images processed if api_version not in cost_per_image_dict: cost_per_image_dict[api_version] = {'total_cost': 0, 'n_images': 0} cost_per_image_dict[api_version]['total_cost'] += total_cost cost_per_image_dict[api_version]['n_images'] += n_images api_versions = list(cost_per_image_dict.keys()) colors = [COLORS_EXPENSE_REPORT[version] if version in COLORS_EXPENSE_REPORT else '#DDDDDD' for version in api_versions] # Calculate the cost per image for each API version for version, cost_data in cost_per_image_dict.items(): total_cost = cost_data['total_cost'] n_images = cost_data['n_images'] # Calculate the cost per image for this version cost_per_image = total_cost / n_images if n_images > 0 else 0 cost_labels.append(version) cost_values.append(cost_per_image) # Generate the pie chart cost_pie_chart = go.Figure(data=[go.Pie(labels=cost_labels, values=cost_values, hole=.3)]) # Update traces for custom text in hoverinfo, displaying cost with a dollar sign and two decimal places cost_pie_chart.update_traces( marker=dict(colors=colors), text=[f"${value:.2f}" for value in cost_values], # Formats the cost as a string with a dollar sign and two decimals textinfo='percent+label', hoverinfo='label+percent+text' # Adds custom text (formatted cost) to the hover information ) st.plotly_chart(cost_pie_chart, use_container_width=True) st.subheader('Proportion of Total Cost by API Version') cost_labels = [] cost_proportions = [] total_cost_by_version = {} # Sum the total cost for each API version for index, row in expense_report.iterrows(): api_version = row['api_version'] total_cost = row['total_cost'] if api_version not in total_cost_by_version: total_cost_by_version[api_version] = 0 total_cost_by_version[api_version] += total_cost # Calculate the combined total cost for all versions combined_total_cost = sum(total_cost_by_version.values()) # Calculate the proportion of total cost for each API version for version, total_cost in total_cost_by_version.items(): proportion = (total_cost / combined_total_cost) * 100 if combined_total_cost > 0 else 0 cost_labels.append(version) cost_proportions.append(proportion) # Generate the pie chart cost_pie_chart = go.Figure(data=[go.Pie(labels=cost_labels, values=cost_proportions, hole=.3)]) # Update traces for custom text in hoverinfo cost_pie_chart.update_traces( marker=dict(colors=colors), text=[f"${cost:.2f}" for cost in total_cost_by_version.values()], # This will format the cost to 2 decimal places textinfo='percent+label', hoverinfo='label+percent+text' # This tells Plotly to show the label, percent, and custom text (cost) on hover ) st.plotly_chart(cost_pie_chart, use_container_width=True) # API version usage percentages pie chart st.subheader('Runs by API Version') api_versions = list(expense_summary['api_version_percentages'].keys()) percentages = [expense_summary['api_version_percentages'][version] for version in api_versions] pie_chart = go.Figure(data=[go.Pie(labels=api_versions, values=percentages, hole=.3)]) pie_chart.update_layout(margin=dict(t=0, b=0, l=0, r=0)) pie_chart.update_traces(marker=dict(colors=colors),) st.plotly_chart(pie_chart, use_container_width=True) else: st.error('No expense report data available.') def sidebar_content(): if not os.path.exists(os.path.join(st.session_state.dir_home,'expense_report')): validate_dir(os.path.join(st.session_state.dir_home,'expense_report')) expense_report_path = os.path.join(st.session_state.dir_home, 'expense_report', 'expense_report.csv') if os.path.exists(expense_report_path): # File exists, proceed with summarization
st.session_state.expense_summary, st.session_state.expense_report = summarize_expense_report(expense_report_path)
12
2023-10-30 23:25:20+00:00
16k
medsagou/massar-direction-sagoubot
main.py
[ { "identifier": "C_File", "path": "utilities/Class_Files.py", "snippet": "class C_File():\n #____________________________________________________________________________________________________________________________________________________________\n # Le constructeur d'une instance d'un fichier\n # Ce constructeur permet d'attribuer à une instance de fichier son nom (vide par défaut) \n # Ce constructeur permet de spécifier le séparateur des éléments s'il existe (également vide par défauté)su\n # Un séparateur peut être un \";\", une \",\" un \"#', etc. \n def __init__(self,file_name=\"\",sep=\";\", sep2=\"+\"):\n self.nomFichier=file_name\n self.separateur=sep\n self.separateur2=sep2\n \n #____________________________________________________________________________________________________________________________________________________________\n # Vérifie si un fichier exite ou non.\n def existe_fichier(self):\n if os.path.isfile(self.nomFichier):\n return True\n else:\n return False\n #____________________________________________________________________________________________________________________________________________________________\n # Vérifie si un fichier exite ou non.\n def specifier_Nom_fichier(self):\n while True:\n print(\"\\n\")\n print(\"Instanciation et saisie d'un nouveau fichier de travail :\\n\")\n self.nomFichier=input(\"Entrez le chemin de votre fichier : \"+\"\\n\")\n if self.existe_fichier():\n print(\"le fichier spécifié existe déjà dans le répertoire courant, veuillez recommencer\")\n else:\n break \n #____________________________________________________________________________________________________________________________________________________________\n # Créer un fichier vide sans supprimer le fichier de même nom s'il existe\n def create_file(self):\n f = open(self.nomFichier,\"x\") #Création d'un fichier vide. Ici, le fichier n'est pas écrasé contrairement au mode 'w' \n f.close()\n \n #____________________________________________________________________________________________________________________________________________________________\n # Créer un fichier vide avec suppression du fichier de même nom s'il existe\n def create_file_2(self):\n f = open(self.nomFichier,\"w\") #Création d'un fichier vide. Ici, le fichier existant qui porte le même nom est écrasé contrairement mode 'x' \n f.close()\n \n #____________________________________________________________________________________________________________________________________________________________\n # Créer un fichier vide avec possibilité de dialogue avant de supprimer un fichier de même nom s'il existe dans le même répertoire (dossier)\n def creer_fichier_3(self):\n if os.path.exists(self.nomFichier): # Condition pour vérifier si jamais le fichier à créer existe déjà dans le répertoire courant\n print(\"Il existe un fichier qui porte le même nom\"+\"\\n\")\n print(\"Voulez-vous l'écraser ?\")\n while True: # Itération (boucle infinie) pour prévenir les événetuelles erreurs de frappe (autre chose que '1' et '2') (Attention, il faut absolument provoquer quelque part dans la boucle une rupture avec \"break\" )\n # Menu local pour exposer les dexu cas de figures (on peut également créer une instance de la classe Menu ici)\n print(\"Veuillez choisir ce qu'il faut faire, selon les options suivantes : \"+\"\\n\")\n print(\"1. Ecraser le fichier existant\")\n print(\"2. Garder le fichier\")\n rep=input(\"Veuillez taper 1 ou 2 \")\n if rep=='1': # Cas où l'utilisateur choisit d'écraser le fichier existant \n self.creer_fichier_2() # Appel à laméthode creer_fichier_2()\n break # rupture de la boucle d'itération => on sort de la boucle infinie while\n elif rep=='2': # Cas où l'utilisateur choisit de ne pas écraser le fichier existant (pas besoin dans ce cas de faire appel à la méthode creer_fichier_1()) \n break # rupture de la boucle d'itération => on sort de la boucle infinie while\n else: # cas où l'utilisateur n'a tapé ni \"1\", ni\"2\"\n print(\"Erreur de frappe\"+\"\\n\")\n else: # cas où le fichier à créer n'existe pas dans le répertoire courant\n self.creer_fichier_1() # Appel à laméthode creer_fichier_1()\n \n #____________________________________________________________________________________________________________________________________________________________\n def ActiverFichier(self,Message):\n print(Message)\n self.specifier_Nom_fichier()\n self.creer_fichier_3() \n \n #____________________________________________________________________________________________________________________________________________________________\n # Supprimer un fichier\n def supprimer_fichier(self):\n if os.path.exists(self.nomFichier): # Condition pour vérifier si jamais le fichier à créer existe déjà dans le répertoire courant\n os.remove(self.nomFichier)\n print(\"Le fichier a été supprimé\")\n else:\n print(\"Le fichier spécifié n'existe pas dans le répertoire courant\")\n\n #____________________________________________________________________________________________________________________________________________________________\n # Ajouter un élément\n def enregistrer_Element(self,Element):\n with open(self.nomFichier,'a') as F: # Ouverture du fichier en mode lecture.\n F.write(Element)\n\n #____________________________________________________________________________________________________________________________________________________________\n # Ajouter un ensemble d'éléments sous forme de liste\n def Liste_to_Fichier(self,Liste): # 'creer_Fichier_Avec_Liste_Elements(self,ListeElements)' Créer d'un fichier à partir d'une liste : chaque élément de la liste représente une ligne du fichier\n with open(self.nomFichier,'w') as F: # Ouverture du fichier en mode écriture : à ce niveau si le fichier existe il va être écrasé\n F.writelines(Liste)\n def Liste_stript(self, L):\n for i in range(len(L)):\n L[i] = L[i].strip()\n return L\n\n def str_to_fichier(self,string):\n with open(self.nomFichier,'a') as F: # Ouverture du fichier en mode écriture : à ce niveau si le fichier existe il va être écrasé\n F.write(string)\n F.write(\"\\n\")\n return\n def str_to_fichier2(self,string):\n with open(self.nomFichier,'w') as F:\n F.write(string)\n F.write(\"\\n\")\n return\n\n def dict_to_file(self, D):\n if type(D) == dict and D != {}:\n with open(self.nomFichier, 'w') as F:\n for c, v in D.items():\n F.write(str(c) + \";\" + str(v))\n F.write(\"\\n\")\n return True\n else:\n print_error(\"WE HAD A PROBLEM WHILE SAVING YOUR DICT\", console=self.console)\n \n def Liste_to_str_to_Fichier(self,Liste_1): \n Liste = self.Liste_to_Str1(Liste_1)\n with open(self.nomFichier,'a') as F: # Ouverture du fichier en mode écriture : à ce niveau si le fichier existe il va être écrasé\n \n F.writelines(Liste) \n F.writelines('\\n')\n #____________________________________________________________________________________________________________________________________________________________\n # Lire le contenu d'un fichier et le retourne en le plaçant dans une liste\n def fichier_to_Liste(self): # extration d'une liste depuis un fichier : chaque ligne du fichier représente un élément de cette liste\n with open(self.nomFichier, 'r') as f: # Ouverture du fichier en mode lecture.\n return f.readlines()\n def Fichier_to_str(self):\n with open (self.nomFichier,'r') as f:\n return f.read()\n\n def supprimer_element(self,element):\n ch = self.Fichier_to_str()\n print(ch)\n chh = ch.replace(element,'')\n print(chh)\n self.str_to_fichier(ch)\n \n #____________________________________________________________________________________________________________________________________________________________\n # Afficher un fichier ligne par ligne\n def afficher_lignes_fichier(self):\n print(\"\\n Affichage des lignes du fichier \\n\")\n with open(self.nomFichier, 'r') as F:\n for ligne in F:\n print (ligne) \n print(\"\\n Fin affichage des lignes du fichier\")\n\n #____________________________________________________________________________________________________________________________________________________________\n # Afficher un fichier ligne par ligne et pour chaque ligne mot par mot\n def afficher_mots_fichier(self):\n i=0 # uttiliser comme un simple compteur pour afficher dans un message afin de le rendre plus explicite\n with open(self.nomFichier, 'r') as F:\n for ligne in F:\n i+=1\n print(\"Affichage des éléments du contenu la ligne : \",i,\"\\n\") # message explicite\n L=C_Liste(ligne.split(self.separateur)) # Création d'une instance de la classe 'C_Liste'\n L.afficher_Liste() # ici on fait appel à la méthode 'afficher_Liste()' de la classe 'C_Liste'\n\n\n def existe_element_fichier(self,Element):\n Liste_Lignes_du_Fichier=self.Fichier_to_Liste() # extraire_liste(nomFichier)\n if Liste_Lignes_du_Fichier!=[]:\n for i in range(len(Liste_Lignes_du_Fichier)):\n if Element in Liste_Lignes_du_Fichier[i]:\n return(True)\n return(False)\n \n \n def existe_element_fichier2(self,element):\n Liste_Lignes_du_Fichier=self.Fichier_to_Liste() # extraire_liste(nomFichier)\n if Liste_Lignes_du_Fichier!=[]:\n for i in range(len(Liste_Lignes_du_Fichier)):\n L=Liste_Lignes_du_Fichier[i].split(self.separateur)\n if element in L:\n return(True)\n return(False)\n \n \n def existe_element_fichier3(self,element):\n Liste_Lignes_du_Fichier=self.Fichier_to_Liste() # extraire_liste(nomFichier)\n if Liste_Lignes_du_Fichier!=[]:\n for i in range(len(Liste_Lignes_du_Fichier)):\n L=Liste_Lignes_du_Fichier[i].split(self.separateur)\n if element in L:\n return(True, Liste_Lignes_du_Fichier[i])\n return(False,False)\n\n \n \n def modifier_element_fichier(self,Element):\n Nouvelle_Liste=[] # on commence par créer une nouvelle liste, inialisée à vide. Cette liste va nous servir à sauvegarder un \n Liste_Lignes_du_Fichier=self.Fichier_to_Liste() # extraire_liste(nomFichier)\n if Liste_Lignes_du_Fichier!=[]:\n for i in range(len(Liste_Lignes_du_Fichier)):\n Ligne_Courante=Liste_Lignes_du_Fichier[i] # La variable 'Ligne_Courante' est utilisée pour donner plus de clarté sur le plan pédagogique, on peut à la place utiliser directement directement 'Liste_Lignes_du_Fichier[i]'\n Liste_Elements_Ligne_Courante=self.Str_to_List(Ligne_Courante) # Ici on transforme la chaîne de caractère 'Ligne_Courante' en une liste 'Liste_Elements_Ligne_Courante' \n if Element not in Liste_Elements_Ligne_Courante:\n Nouvelle_Liste=Nouvelle_Liste+[Ligne_Courante+'\\n']\n else:\n Nouvelle_Liste=C_Liste(Liste_Elements_Ligne_Courante) # Nouvelle_Liste est une instance de la classe C_Liste\n Nouvelle_Liste_Elements=Nouvelle_Liste.changer_element(Element)\n Nouvelle_Ligne_Modifiee=self.Liste_to_Str(Nouvelle_Liste_Elements)\n Nouvelle_Liste=Nouvelle_Liste+[Nouvelle_Ligne_Modifiee+'\\n'] \n self.Liste_to_Fichier(Nouvelle_Liste) # creer_Fichier_depuis_Liste(nomFichier,Nouvelle_Liste)\n \n def ajouter_a_la_fin_de_la_ligne(self,ID,Element,sep):\n Nouvelle_Liste=[] # on commence par créer une nouvelle liste, inialisée à vide. Cette liste va nous servir à sauvegarder un \n Liste_Lignes_du_Fichier=self.Fichier_to_Liste() # extraire_liste(nomFichier)\n if Liste_Lignes_du_Fichier!=[]:\n for i in range(len(Liste_Lignes_du_Fichier)):\n Ligne_Courante=Liste_Lignes_du_Fichier[i] # La variable 'Ligne_Courante' est utilisée pour donner plus de clarté sur le plan pédagogique, on peut à la place utiliser directement directement 'Liste_Lignes_du_Fichier[i]'\n Liste_Elements_Ligne_Courante=self.str_to_liste(Ligne_Courante) # Ici on transforme la chaîne de caractère 'Ligne_Courante' en une liste 'Liste_Elements_Ligne_Courante' \n if ID not in Liste_Elements_Ligne_Courante:\n Nouvelle_Liste=Nouvelle_Liste+[Ligne_Courante+'\\n']\n else:\n Liste_Elements_Ligne_Courante[-1] = Liste_Elements_Ligne_Courante[-1].replace('\\n','') +sep+ str(Element)\n \n Nouvelle_Liste_Elements=Liste_Elements_Ligne_Courante\n Nouvelle_Ligne_Modifiee=self.Liste_to_Str1(Nouvelle_Liste_Elements)\n Nouvelle_Liste=Nouvelle_Liste+[Nouvelle_Ligne_Modifiee+'\\n'] \n self.Liste_to_Fichier(Nouvelle_Liste) # creer_Fichier_depuis_Liste(nomFichier,Nouvelle_Liste)\n \n \n def Liste_to_Str1(self,Liste_Elements):\n return self.separateur.join(map(str, Liste_Elements))\n \n def Liste_to_Str2(self,Liste_Elements):\n return self.separateur2.join(Liste_Elements)\n \n def supprimer_element_fichier(self,Element):\n Nouvelle_Liste=[] # on commence par créer une nouvelle liste, inialisée à vide. Cette liste va nous servir à sauvegarder un \n# erreur d'écriture Liste_Lignes_du_Fichier=Fichier_to_Liste(self) # extraire_liste(nomFichier)\n Liste_Lignes_du_Fichier=self.Fichier_to_Liste() # extraire_liste(nomFichier)\n if Liste_Lignes_du_Fichier!=[]:\n for i in range(len(Liste_Lignes_du_Fichier)):\n if Element not in Liste_Lignes_du_Fichier[i]:\n Nouvelle_Liste=Nouvelle_Liste+[Liste_Lignes_du_Fichier[i]+'\\n']\n# écriture erronée Liste_to_Fichier(self.nomFichier,Nouvelle_Liste) # creer_Fichier_depuis_Liste(nomFichier,Nouvelle_Liste)\n self.Liste_to_Fichier(Nouvelle_Liste) # creer_Fichier_depuis_Liste(nomFichier,Nouvelle_Liste)\n \n def supprimer_ligne_fichier(self,Element_ligne):\n Nouvelle_Liste=[] # on commence par créer une nouvelle liste, inialisée à vide. Cette liste va nous servir à sauvegarder un \n# erreur d'écriture Liste_Lignes_du_Fichier=Fichier_to_Liste(self) # extraire_liste(nomFichier)\n Liste_Lignes_du_Fichier=self.Fichier_to_Liste() # extraire_liste(nomFichier)\n if Liste_Lignes_du_Fichier!=[]:\n for i in range(len(Liste_Lignes_du_Fichier)):\n if Element_ligne not in Liste_Lignes_du_Fichier[i]:\n Nouvelle_Liste=Nouvelle_Liste+[Liste_Lignes_du_Fichier[i]]\n else:\n continue\n# écriture erronée Liste_to_Fichier(self.nomFichier,Nouvelle_Liste) # creer_Fichier_depuis_Liste(nomFichier,Nouvelle_Liste)\n self.Liste_to_Fichier(Nouvelle_Liste) # creer_Fichier_depuis_Liste(nomFichier,Nouvelle_Liste)\n \n def supprimer_ligne_fichier2(self,Element_ligne):\n Nouvelle_Liste=[] # on commence par créer une nouvelle liste, inialisée à vide. Cette liste va nous servir à sauvegarder un \n# erreur d'écriture Liste_Lignes_du_Fichier=Fichier_to_Liste(self) # extraire_liste(nomFichier)\n Liste_Lignes_du_Fichier=self.Fichier_to_Liste() # extraire_liste(nomFichier)\n if Liste_Lignes_du_Fichier!=[]:\n for i in range(len(Liste_Lignes_du_Fichier)):\n if Element_ligne+\"\\n\" not in Liste_Lignes_du_Fichier[i].split(self.separateur)[-1].split(self.separateur2) and Element_ligne not in Liste_Lignes_du_Fichier[i].split(self.separateur)[-1].split(self.separateur2):\n Nouvelle_Liste=Nouvelle_Liste+[Liste_Lignes_du_Fichier[i]]\n else:\n continue\n# écriture erronée Liste_to_Fichier(self.nomFichier,Nouvelle_Liste) # creer_Fichier_depuis_Liste(nomFichier,Nouvelle_Liste)\n self.Liste_to_Fichier(Nouvelle_Liste) #\n \n def modiffier_ligne(self,Element_ligne,nv_ligne):\n Nouvelle_Liste=[] \n Liste_Lignes_du_Fichier=self.Fichier_to_Liste() \n if Liste_Lignes_du_Fichier!=[]:\n for i in range(len(Liste_Lignes_du_Fichier)):\n if Element_ligne not in Liste_Lignes_du_Fichier[i]:\n Nouvelle_Liste=Nouvelle_Liste+[Liste_Lignes_du_Fichier[i]]\n else:\n Nouvelle_Liste = Nouvelle_Liste+[nv_ligne + '\\n']\n self.Liste_to_Fichier(Nouvelle_Liste) #\n return\n \n \n\n def str_to_liste(self, string):\n return string.split(self.separateur)\n \n \n def nbre_ligne(self):\n return len(self.Fichier_to_Liste())\n \n\n def str_to_liste2(self, string):\n return string.split(self.separateur2)" }, { "identifier": "C_Dossier", "path": "utilities/Class_Files.py", "snippet": "class C_Dossier():\n\n \n def __init__(self,sep=\"\"):\n self.separateur=sep\n \n def dossier_courant(self):\n return os.getcwd()\n\n def existe_dossier(self,Chemin):\n if os.path.exists(Chemin) :\n return True\n else:\n return False \n \n def changer_dossier(self,Chemin):\n if C_Dossier.existe_dossier(Chemin):\n return(chdir(Chemin))\n \n \n def creer_dossier(self,Chemin):\n if not C_Dossier.existe_dossier(Chemin):\n return(mkdir(Chemin))" }, { "identifier": "Read_Db", "path": "absence_app/Read_XLSB_File.py", "snippet": "class Read_Db:\n def __init__(self, input_file = r\"data_to_manage/file_data.xls\", template_file = \"data_to_manage/template.xlsx\", output_file = \"data_to_manage/absence.xlsx\", df = \"\", required_classes=[], progress_bar=\"\", console=\"\"):\n self.index = {0: \"CLASS_StudentIndex\",\n 1: \"Niveau\",\n 2: \"class_name\",\n 3: \"student_index\",\n \"Unnamed: 23\": \"CNE\",\n \"Unnamed: 12\": \"nom\",\n \"Unnamed: 16\": \"prenom\"}\n self.input_file = input_file\n self.output_file = output_file\n self.template_file = template_file\n self.df = df\n self.init_cell = [\"A\"]\n self.start_col = 'A'\n self.end_col = 'C'\n # self.workbook_output = self.get_workbook(output_file)\n self.workbook_output = \"\"\n self.required_classes = required_classes\n self.progress_bar = progress_bar\n self.console = console\n\n def get_key(self, val):\n for key, value in self.index.items():\n if val == value:\n return key\n return \"key doesn't exist\"\n\n def get_data_from_xlsb(self):\n xlsb_file = pd.ExcelFile(self.input_file)\n df = xlsb_file.parse('Feuil3', header=None) #\n self.df = df\n return df\n def get_df_from_xls(self):\n xls = pd.ExcelFile(self.input_file)\n workbook = self.get_data_from_xls()\n sheet_names = xls.sheet_names\n data = {}\n for sheet_name in sheet_names:\n sheet = workbook[sheet_name]\n df = pd.read_excel(self.input_file, sheet_name=sheet_name)\n class_name = sheet.cell_value(10, 8)\n data[class_name] = df\n self.df = data\n return data\n\n def get_data_from_xls(self): # new data function\n return xlrd.open_workbook(self.input_file)\n def get_classes_name_from_xls(self):\n workbook = self.get_data_from_xls()\n classes = []\n sheet_names = workbook.sheet_names()\n for sheet_name in sheet_names:\n sheet = workbook[sheet_name]\n class_name = sheet.cell_value(10, 8)\n # print(class_name)\n classes.append(class_name)\n return classes\n\n def get_workbook(self, file_name):\n workbook = openpyxl.load_workbook(file_name)\n return workbook\n\n\n def get_workbook_sheet(self, workbook ,sheet):\n return workbook[sheet]\n\n def add_value_to_sheet(self, worksheet, cell, value):\n cell_to_update = worksheet[cell]\n cell_to_update.value = value\n return\n\n\n def create_copy_sheet(self, class_name = \"\", workbook = \"\", source_sheet = \"\"):\n new_sheet = workbook.copy_worksheet(source_sheet)\n new_sheet.title = class_name\n new_sheet.sheet_view.rightToLeft = True\n return\n\n\n def get_column_list_from_df(self, column_key):\n if self.df == \"\":\n self.get_df_from_xls()\n\n L = list(set(self.df.values[:, column_key].tolist()))\n try:\n L.remove(\"0\")\n except ValueError:\n pass\n try:\n L.remove(0)\n except ValueError:\n pass\n return L\n def restart_workbook_output(self):\n self.workbook_output.close()\n self.workbook_output = self.get_workbook(self.output_file)\n return\n def get_sheet_names_workbout_output(self):\n self.workbook_output = self.get_workbook(self.output_file)\n return self.workbook_output.sheetnames\n\n\n\n\n def create_all_class_sheet(self):\n if check_exist_file(self.output_file):\n # class_in_sheet = self.get_sheet_names_workbout_output()\n # with open(self.output_file, 'w') as f:\n # f.close()\n os.remove(self.output_file)\n print_info(\"WE REMOVED THE OUTPUT FILE TO CREATE NEW ONE\", console=self.console)\n # else:\n # class_in_sheet = []\n # classes_list = self.get_column_list_from_df(column_key=self.get_key(\"class_name\"))\n\n workbook = openpyxl.load_workbook(self.template_file)\n source_sheet = workbook[\"BaseSheet\"]\n classes_list = self.get_classes_name_from_xls()\n # print(classes_list)\n for classe in classes_list:\n # if classe in class_in_sheet:\n # print_error(f\"SHEET FOR {classe} ALREADY EXIST\")\n # continue\n # if not in college just skipit\n if classe.split(\"-\")[0][1:] not in self.required_classes:\n continue\n print_info(f\"CREATE A SHEET FOR {classe} CLASS\", console=self.console)\n if classe != \"\":\n self.create_copy_sheet(class_name=classe, workbook=workbook, source_sheet = source_sheet)\n\n workbook.save(str(self.output_file))\n workbook.close()\n return\n\n def fill_all_class_sheets(self):\n self.create_all_class_sheet()\n # already check above\n if str(self.df) == \"\":\n print_info(\"GETTING THE DATA...\", console=self.console)\n self.get_data_from_xls()\n # print_info(\"RESTARTING WORKSHEET\")\n # self.restart_workbook_output()\n self.workbook_output = self.get_workbook(self.output_file)\n class_in_sheet = list(self.get_sheet_names_workbout_output())\n # print(class_in_sheet)\n for k in range(len(class_in_sheet)):\n # print(f\"{k+1}/{len(class_in_sheet)}\")\n self.progress_bar.set((k+1)/len(class_in_sheet))\n worksheet = self.get_workbook_sheet(workbook = self.workbook_output, sheet=class_in_sheet[k])\n i = 0\n print_info(f\"WORKING ON {class_in_sheet[k]} CLASS DATA TO SHEET\", console=self.console)\n # column = db.df[\"3ASCG-5\"].columns.tolist()\n #\n # for index, row in db.df[\"3ASCG-5\"].iterrows():\n # if pd.isna(row[column[23]]):\n # continue\n # print(row[column[23]], row[column[16]], row[column[12]])\n index_student = 0\n self.get_df_from_xls()\n if class_in_sheet[k] == 'BaseSheet':\n continue\n for index, row in self.df[class_in_sheet[k]].iterrows():\n if pd.isna(row[self.get_key(\"CNE\")]):\n continue\n if index_student == 0:\n index_student += 1\n continue\n i += 1\n # print(row)\n for col in range(ord(self.start_col), ord(self.end_col) + 1):\n if chr(col) == \"A\":\n self.add_value_to_sheet(worksheet=worksheet, cell=chr(col) + str(9 + i), value=index_student)\n elif chr(col) == \"B\":\n self.add_value_to_sheet(worksheet=worksheet, cell=chr(col) + str(9 + i), value=row[self.get_key(\"CNE\")])\n elif chr(col) == \"C\":\n self.add_value_to_sheet(worksheet=worksheet, cell=chr(col) + str(9 + i),\n value=str(row[self.get_key(\"prenom\")] + \" \" + str(row[self.get_key(\"nom\")])))\n self.add_value_to_sheet(worksheet=worksheet, cell=\"BA\" + str(9 + i), value=str(row[self.get_key(\"prenom\")] + \" \" + str(row[self.get_key(\"nom\")])))\n if i > 49:\n return\n\n index_student += 1\n\n\n # add number of students\n self.add_value_to_sheet(worksheet=worksheet, cell=\"AO6\", value=str(i))\n # add class name\n self.add_value_to_sheet(worksheet=worksheet, cell=\"D6\", value=class_in_sheet[k])\n self.workbook_output.save(self.output_file)\n # self.workbook_output.close()\n print_success(\"Your lists is generated successfully\", console=self.console)\n print_success(f\"Your file path: {self.output_file}\", console=self.console)\n return" }, { "identifier": "Absence", "path": "absence_app/Absences.py", "snippet": "class Absence:\n def __init__(self, driver=\"\", console=\"\"):\n self.driver = driver\n self.console = console\n self.data_table_Xpath = \"/html/body/div/div[1]/div[2]/div[2]/section[2]/div[2]/div[1]/div/div/div[2]/div/form/div/div/div/div/div/div/div/div[2]/div/table\"\n self.data_table_reduced_Xpath = '//*[@id=\"DataTables-Table-0\"]/tbody'\n self.row_Xpath = '//*[@id=\"DataTables-Table-0\"]/tbody/tr['\n self.nome_Xpath = ']/td[3]'\n self.CNE_Xpath = ']/td[2]'\n self.select_Xpath = ']/td[4]/select'\n self.h_Xpath = ']/td['\n self.dates = \"\"\n self.searchBtn = self.driver.find_element(By.CSS_SELECTOR, \"#search > div > div > div > div.box-body > div.blocBtn > button\")\n self.saveBtnCssSelector = \"#gridFrom > button\"\n\n def get_list_page(self):\n try:\n self.driver.get(\"https://massar.men.gov.ma/Evaluation/Absence/AbsenceJournaliereParClasse\")\n except Exception as e:\n print_error(e, console=self.console)\n print_error(\"We Can't find the list page! Close the program and try again.\", console=self.console)\n else:\n print_info(\"GETTING TO THE LIST PAGE\", console=self.console)\n\n def get_classes_from_classes_page(self):\n return\n\n def main_absence_loop(self):\n TypeEnseignement = self.driver.find_element(By.ID, \"TypeEnseignement\")\n TypeEnseignement_all_options = TypeEnseignement.find_elements(By.TAG_NAME, \"option\")\n TypeEnseignement_Select = Select(TypeEnseignement)\n\n for TypeEnseignement_option in TypeEnseignement_all_options:\n try:\n WebDriverWait(self.driver, 5).until(\n EC.invisibility_of_element_located(\n (\n By.ID, \"loadingDiv\",\n )\n )\n )\n except Exception as e:\n print_error(e, console=self.console)\n print_error(\"CHECK YOUR INTERNET CONNECTION THEN TRY AGAIN\", console=self.console)\n TypeEnseignement_Select.select_by_value(TypeEnseignement_option.get_attribute(\"value\"))\n\n Cycle = self.driver.find_element(By.ID, \"Cycle\")\n Cycle_all_options = Cycle.find_elements(By.TAG_NAME, \"option\")\n\n Cycle_Select = Select(Cycle)\n\n for Cycle_option in Cycle_all_options:\n if Cycle_option.text != \"\":\n Cycle_Select.select_by_value(Cycle_option.get_attribute(\"value\"))\n Niveau = self.driver.find_element(By.ID, \"Niveau\")\n Niveau_all_options = Niveau.find_elements(By.TAG_NAME, \"option\")\n Niveau_Select = Select(Niveau)\n\n for Niveau_option in Niveau_all_options:\n if Niveau_option.text != \"\":\n Niveau_Select.select_by_value(Niveau_option.get_attribute(\"value\"))\n\n Classe = self.driver.find_element(By.ID, \"Classe\")\n Classe_all_options = Classe.find_elements(By.TAG_NAME, \"option\")\n Classe_Select = Select(Classe)\n\n for Classe_option in Classe_all_options:\n\n if Classe_option.text != \"\":\n classe_absence = Scan_Absences(classe=Classe_option.text)\n classe_list_absence, start_date, end_date = classe_absence.get_absence_day_per_student2()\n\n if classe_list_absence == False:\n print_info(f\"THE CLASS {Classe_option.text} NOT IN THE EXCEL FILE\", console=self.console)\n continue\n self.dates = get_date_list(start_date_str=start_date, end_date_str=end_date)\n Classe_Select.select_by_value(Classe_option.get_attribute(\"value\"))\n for l in range(len(self.dates)):\n print_success(f\"WORKING ON CLASS {Classe_option.text}, DATE {self.dates[l]}...\", console=self.console)\n date = self.driver.find_element(By.ID, \"Jour\")\n date.send_keys(Keys.CONTROL + \"a\")\n date.send_keys(Keys.DELETE)\n date.send_keys(self.dates[l])\n try:\n WebDriverWait(self.driver, 15).until(\n EC.element_to_be_clickable((By.CSS_SELECTOR, '#search > div > div > div > div.box-body > div.blocBtn > button'))\n )\n except Exception as e:\n print_error(e, console=self.console)\n pass\n else:\n self.searchBtn = self.driver.find_element(By.CSS_SELECTOR, '#search > div > div > div > div.box-body > div.blocBtn > button')\n self.searchBtn.click()\n try:\n WebDriverWait(self.driver, 3).until(\n EC.invisibility_of_element_located(\n (\n By.ID, \"loadingDiv\",\n )\n )\n )\n except Exception as e:\n print_error(e, console=self.console)\n continue\n else:\n print_info(\"FILLING THE ABSENCE...\", console=self.console)\n self.fill_absence(classe_list_absence=classe_list_absence,class_name=Classe_option.text, day_index = l)\n try:\n WebDriverWait(self.driver, 30).until(\n EC.presence_of_element_located((By.CSS_SELECTOR,\"#gridFrom > button\"))\n )\n except Exception as e:\n print_error(e, console=self.console)\n print_error('WE COULD NOT FIND THE SAVE BUTTON ', console=self.console)\n self.driver.quit()\n # sys.exit()\n else:\n try:\n WebDriverWait(self.driver, 15).until(EC.element_to_be_clickable((By.CSS_SELECTOR, \"#gridFrom > button\")))\n except Exception as e:\n print_error(e, console=self.console)\n print_error('WE COULD NOT FIND THE SAVE BUTTON', console=self.console)\n else:\n saveBtn = self.driver.find_element(By.CSS_SELECTOR, \"#gridFrom > button\")\n # saveBtn.click()\n self.driver.execute_script(\"arguments[0].click();\", saveBtn)\n\n print_info('SAVE BUTTON IS CLICKED', console=self.console)\n try:\n WebDriverWait(self.driver, 3).until(\n EC.invisibility_of_element_located(\n (\n By.ID, \"loadingDiv\",\n )\n )\n )\n except Exception as e:\n print_error(e, console=self.console)\n pass\n try:\n WebDriverWait(self.driver, 10).until(\n EC.presence_of_element_located(\n (\n By.ID, \"Model_msg_Btn\",\n )\n )\n )\n except Exception as e:\n print_error(e, console=self.console)\n print_error('WE COULD NOT FIND THE CLOSE BUTTON', console=self.console)\n else:\n print_info('CLOSE BUTTON IS CLOSED', console=self.console)\n close_btn = self.driver.find_element(By.ID, \"Model_msg_Btn\")\n close_btn.click()\n try:\n WebDriverWait(self.driver, 3).until(\n EC.invisibility_of_element_located(\n (\n By.ID, \"loadingDiv\",\n )\n )\n )\n except Exception as e:\n print_error(e, console=self.console)\n pass\n\n print_success(f\"CLASS {Classe_option.text} PASSED, DATE {self.dates[l]}\", console=self.console)\n\n return\n\n def fill_absence(self, classe_list_absence, class_name, day_index):\n mytable = self.driver.find_element(By.XPATH, self.data_table_reduced_Xpath)\n i = 0\n for row in mytable.find_elements(By.CSS_SELECTOR, 'tr'):\n i += 1\n cne = self.driver.find_element(By.XPATH, str(self.row_Xpath) + str(i) + str(self.CNE_Xpath))\n name = self.driver.find_element(By.XPATH, str(self.row_Xpath) + str(i) + str(self.nome_Xpath))\n\n try:\n week_absence_student = classe_list_absence[cne.text]\n week_days_per_student = self.list_week_to_days(week_absence_student)\n except KeyError as e:\n print_error(e, self.console)\n print_error(f'THIS CNE {cne.text} DOES NOT EXIST, THE NAME IS: {name.text}, CLASS: {class_name}', console=self.console)\n else:\n self.fill_absence_per_day(i,week_days_per_student[day_index])\n\n # if classe_name == \"1APIC-1\":\n # time.sleep(400)\n return\n\n def fill_absence_per_day(self,row_i, day):\n j = 0\n if str(day[0]) == \"0\":\n select_cause = Select(self.driver.find_element(By.XPATH, str(self.row_Xpath) + str(row_i) + str(self.select_Xpath)))\n select_cause.select_by_value(\"2\")\n checkbox = self.driver.find_element(By.XPATH, str(self.row_Xpath) + str(row_i) + str(self.h_Xpath) + str(5) + \"]/input[1]\")\n checkbox.click()\n return\n elif \"x\" in day:\n try:\n WebDriverWait(self.driver, 10).until(\n EC.presence_of_element_located(\n (\n By.XPATH, str(self.row_Xpath) + str(row_i) + str(self.select_Xpath)\n )\n )\n )\n except Exception as e:\n print_error(e, self.console)\n print_error(\"AN ERROR IN HTML SELECTION PLEASE TRY AGAIN.\", console=self.console)\n self.exit_program()\n select_cause = Select(self.driver.find_element(By.XPATH, str(self.row_Xpath) + str(row_i) + str(self.select_Xpath)))\n select_cause.select_by_value(\"2\")\n for i in range(len(day)):\n if day[i] == None:\n continue\n if str(day[i]) == \"x\":\n # print(day[i])\n if i < 4:\n checkbox = self.driver.find_element(By.XPATH, str(self.row_Xpath) + str(row_i) + str(self.h_Xpath) + str(6 + i) + \"]/input[1]\")\n else:\n checkbox = self.driver.find_element(By.XPATH, str(self.row_Xpath) + str(row_i) + str(\n self.h_Xpath) + str(8 + i) + \"]/input[1]\")\n checkbox.click()\n else:\n print_error('WE CANNOT REGONIZE THE FILL OF THE CELL', console=self.console)\n\n # j += 1\n # date = self.driver.find_element(By.ID, \"Jour\")\n # date.send_keys(Keys.CONTROL + \"a\")\n # date.send_keys(Keys.DELETE)\n # date.send_keys(self.dates[j])\n # self.searchBtn.click()\n\n\n def list_week_to_days(self, list_week):\n index = 0\n week = []\n day = []\n for i in range(2,len(list_week)):\n if index == 8:\n week.append(day)\n day = []\n index = 0\n day.append(list_week[i])\n index += 1\n week.append(day)\n return week\n\n\n def main_list_reader(self):\n self.get_list_page()\n self.list_of_each_class()\n return" } ]
import tkinter as tk import customtkinter import time import os import threading import logging import sys from tkinter import filedialog from PIL import Image from validate_email import validate_email from utilities import C_File, C_Dossier from dotenv import set_key, load_dotenv from absence_app import Read_Db from absence_app import Absence from Interaction_browser import Massar_Direction_Sagou
11,129
self.sideBar_logo = customtkinter.CTkLabel(self.sidebar_frame, text="", image=self.main_logo_image) self.sideBar_logo.grid(row=5, column=0, padx=20, pady=20) self.entry_default_bordercolor = customtkinter.CTkEntry(self).cget("border_color") # self.logo_label = customtkinter.CTkLabel(self.sidebar_frame, text="SagouBot", font=customtkinter.CTkFont(size=40, weight="bold")) # self.logo_label.grid(row=1, column=0, padx=20, pady=(20, 10)) self.generate_list_menu_button_event() # Console (Text area) self.console_text = customtkinter.CTkTextbox(self, height=200, width=400, fg_color="gray1") self.console_text.insert("0.0", "CONSOLE") self.console_text.insert(F"{len('CONSOLE')}.0", "--------" * 28) self.console_text.configure(state="disabled") self.console_text.grid(row=1, column=1, padx=(20, 20), pady=(5, 15), sticky="nsew") self.console_text.tag_config("error", foreground="red") self.console_text.tag_config("note", foreground="orange") self.console_text.tag_config("successes", foreground="blue") # self.generate_progress_bar() # Progress Bar # progress_bar = customtkinter.CTkProgressBar(self, mode='determinate') # progress_bar.grid(row=1, column=1, padx=(20, 20), pady=(5, 0), sticky="nsew") # # Button to trigger updates # update_button = customtkinter.CTkButton(self, text="Start Processing", command=()) # update_button.grid(row=1, column=1, padx=(20, 20), pady=(5, 0), sticky="nsew") def high_school_switch(self): state = self.high_school_options.get() options = [self.TCS, self.TCSF, self.TCLSH, self.BACSC, self.BACSH, self.BACSE, self.BACSVT, self.BACSH2] if state: for option in options: option.configure(state="normal") else: for option in options: option.configure(state="disabled") return def college_switch(self): state = self.college_options.get() if state: self.college_generale.configure(state="normal") self.college_aspeb.configure(state="normal") self.college_inter.configure(state="normal") else: self.college_generale.configure(state="disabled") self.college_aspeb.configure(state="disabled") self.college_inter.configure(state="disabled") def college_label_error(self): current_text = self.label_college.cget("text") self.label_college.configure(text=current_text.replace("*", "") + "*", text_color="red") return def high_school_label_eroor(self): current_text = self.label_high_school.cget("text") self.label_high_school.configure(text=current_text.replace("*", "") + "*", text_color="red") return def reset_label_high_college(self): current_text1 = self.label_college.cget("text") current_text = self.label_high_school.cget("text") self.label_high_school.configure(text=current_text.replace("*", ""), text_color="gray90") self.label_college.configure(text=current_text1.replace("*", ""), text_color="gray90") def label_data_file_error(self): current_text = self.label_data_file.cget("text") self.label_data_file.configure(text=current_text.replace("*", "") + "*", text_color="red") return def label_template_file_error(self): current_text = self.label_template_entry.cget("text") self.label_template_entry.configure(text=current_text.replace("*", "") + "*", text_color="red") return def reset_error1(self): current_text = self.label_data_file.cget("text") self.label_data_file.configure(text=current_text.replace("*", ""), text_color="gray90") return def reset_error2(self): current_text = self.label_template_entry.cget("text") self.label_template_entry.configure(text=current_text.replace("*", ""), text_color="gray90") return def directory_error(self): current_text = self.label_output_folder.cget("text") self.label_output_folder.configure(text=current_text + "*", text_color="red") return def reset_error3(self): current_text = self.label_output_folder.cget("text") self.label_output_folder.configure(text=current_text.replace("*", ""), text_color="gray90") return def go_to_review2(self): if self.email_entry.get() == "" or self.password_entry.get() == "" or not self.validate_path(self.entry_path_absence) or not self.check_terms_and_condition.get(): if self.email_entry.get() == "": self.error_label(self.label_email_entry) self.entry_error(self.email_entry) if len(self.password_entry.get()) < 8: self.error_label(self.label_password_entry) self.entry_error(self.password_entry) if not self.validate_path(self.entry_path_absence): self.error_label(self.label_absence_data_file) self.entry_error(self.entry_path_absence) if not self.check_terms_and_condition.get(): self.check_terms_and_condition.configure(border_color="red", text_color="red") self.error_label(self.label_terms) else:
# https://stackoverflow.com/questions/31836104/pyinstaller-and-onefile-how-to-include-an-image-in-the-exe-file def resource_path(relative_path): """ Get absolute path to resource, works for dev and for PyInstaller """ try: # PyInstaller creates a temp folder and stores path in _MEIPASS base_path = sys._MEIPASS2 except Exception: base_path = os.path.abspath(".") return os.path.join(base_path, relative_path) logging.basicConfig(filename='app.log', level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s') customtkinter.set_appearance_mode("Dark") # Modes: "System" (standard), "Dark", "Light" customtkinter.set_default_color_theme("dark-blue") # Themes: "blue" (standard), "green", "dark-blue" dirPath = os.path.dirname(os.path.realpath(__file__)) class App(customtkinter.CTk): def __init__(self): super().__init__() self.tabview_generate_lists = None self.tabview_fill_bot= None self.generate_list_menu = None self.about_us_text = None self.fill_absence_menu = None self.try_again_generate = False self.try_again_fill = False self.progressbar_1 = None image_path = resource_path("images") self.main_logo_image = customtkinter.CTkImage( light_image=Image.open(os.path.join(image_path, "logo_black.png")), dark_image=Image.open(os.path.join(image_path, "logo_white.png")), size=(200,200)) self.about_us_image = customtkinter.CTkImage( light_image=Image.open(os.path.join(image_path, "logo_black.png")), dark_image=Image.open(os.path.join(image_path, "logo_white.png")), size=(150, 150)) # self.main_logo_photo = ImageTk.PhotoImage(self.main_logo_image) # configure window self.title("SagouBot Massar Direction") self.iconbitmap(resource_path("icon.ico")) self.geometry(f"{1100}x{580}") # configure grid layout (4x4) self.grid_columnconfigure(1, weight=1) self.grid_columnconfigure((2, 3), weight=0) self.grid_rowconfigure((0, 1, 2), weight=1) # create sidebar frame with widgets self.sidebar_frame = customtkinter.CTkFrame(self, width=200, corner_radius=0) self.sidebar_frame.grid(row=0, column=0, rowspan=4, sticky="nsew") self.sidebar_frame.grid_rowconfigure(5, weight=1) self.sidebar_frame.grid(row=0, column=0) self.sideBar_logo = customtkinter.CTkLabel(self.sidebar_frame, text="", image=self.main_logo_image) self.sideBar_logo.grid(row=5, column=0, padx=20, pady=20) self.entry_default_bordercolor = customtkinter.CTkEntry(self).cget("border_color") # self.logo_label = customtkinter.CTkLabel(self.sidebar_frame, text="SagouBot", font=customtkinter.CTkFont(size=40, weight="bold")) # self.logo_label.grid(row=1, column=0, padx=20, pady=(20, 10)) self.generate_list_menu_button_event() # Console (Text area) self.console_text = customtkinter.CTkTextbox(self, height=200, width=400, fg_color="gray1") self.console_text.insert("0.0", "CONSOLE") self.console_text.insert(F"{len('CONSOLE')}.0", "--------" * 28) self.console_text.configure(state="disabled") self.console_text.grid(row=1, column=1, padx=(20, 20), pady=(5, 15), sticky="nsew") self.console_text.tag_config("error", foreground="red") self.console_text.tag_config("note", foreground="orange") self.console_text.tag_config("successes", foreground="blue") # self.generate_progress_bar() # Progress Bar # progress_bar = customtkinter.CTkProgressBar(self, mode='determinate') # progress_bar.grid(row=1, column=1, padx=(20, 20), pady=(5, 0), sticky="nsew") # # Button to trigger updates # update_button = customtkinter.CTkButton(self, text="Start Processing", command=()) # update_button.grid(row=1, column=1, padx=(20, 20), pady=(5, 0), sticky="nsew") def high_school_switch(self): state = self.high_school_options.get() options = [self.TCS, self.TCSF, self.TCLSH, self.BACSC, self.BACSH, self.BACSE, self.BACSVT, self.BACSH2] if state: for option in options: option.configure(state="normal") else: for option in options: option.configure(state="disabled") return def college_switch(self): state = self.college_options.get() if state: self.college_generale.configure(state="normal") self.college_aspeb.configure(state="normal") self.college_inter.configure(state="normal") else: self.college_generale.configure(state="disabled") self.college_aspeb.configure(state="disabled") self.college_inter.configure(state="disabled") def college_label_error(self): current_text = self.label_college.cget("text") self.label_college.configure(text=current_text.replace("*", "") + "*", text_color="red") return def high_school_label_eroor(self): current_text = self.label_high_school.cget("text") self.label_high_school.configure(text=current_text.replace("*", "") + "*", text_color="red") return def reset_label_high_college(self): current_text1 = self.label_college.cget("text") current_text = self.label_high_school.cget("text") self.label_high_school.configure(text=current_text.replace("*", ""), text_color="gray90") self.label_college.configure(text=current_text1.replace("*", ""), text_color="gray90") def label_data_file_error(self): current_text = self.label_data_file.cget("text") self.label_data_file.configure(text=current_text.replace("*", "") + "*", text_color="red") return def label_template_file_error(self): current_text = self.label_template_entry.cget("text") self.label_template_entry.configure(text=current_text.replace("*", "") + "*", text_color="red") return def reset_error1(self): current_text = self.label_data_file.cget("text") self.label_data_file.configure(text=current_text.replace("*", ""), text_color="gray90") return def reset_error2(self): current_text = self.label_template_entry.cget("text") self.label_template_entry.configure(text=current_text.replace("*", ""), text_color="gray90") return def directory_error(self): current_text = self.label_output_folder.cget("text") self.label_output_folder.configure(text=current_text + "*", text_color="red") return def reset_error3(self): current_text = self.label_output_folder.cget("text") self.label_output_folder.configure(text=current_text.replace("*", ""), text_color="gray90") return def go_to_review2(self): if self.email_entry.get() == "" or self.password_entry.get() == "" or not self.validate_path(self.entry_path_absence) or not self.check_terms_and_condition.get(): if self.email_entry.get() == "": self.error_label(self.label_email_entry) self.entry_error(self.email_entry) if len(self.password_entry.get()) < 8: self.error_label(self.label_password_entry) self.entry_error(self.password_entry) if not self.validate_path(self.entry_path_absence): self.error_label(self.label_absence_data_file) self.entry_error(self.entry_path_absence) if not self.check_terms_and_condition.get(): self.check_terms_and_condition.configure(border_color="red", text_color="red") self.error_label(self.label_terms) else:
paths = C_File(resource_path("db/paths.txt"))
0
2023-10-29 18:10:27+00:00
16k
hsma-programme/Teaching_DES_Concepts_Streamlit
pages/2_🛏️_Using_A_Simple_Resource.py
[ { "identifier": "add_logo", "path": "helper_functions.py", "snippet": "def add_logo():\n '''\n Add a logo at the top of the page navigation sidebar\n\n Approach written by blackary on\n https://discuss.streamlit.io/t/put-logo-and-title-above-on-top-of-page-navigation-in-sidebar-of-multipage-app/28213/5\n \n '''\n st.markdown(\n \"\"\"\n <style>\n [data-testid=\"stSidebarNav\"] {\n background-image: url(https://raw.githubusercontent.com/hsma-programme/Teaching_DES_Concepts_Streamlit/main/resources/hsma_logo_transparent_background_small.png);\n background-repeat: no-repeat;\n padding-top: 175px;\n background-position: 40px 30px;\n }\n [data-testid=\"stSidebarNav\"]::before {\n content: \"The DES Playground\";\n padding-left: 20px;\n margin-top: 50px;\n font-size: 30px;\n position: relative;\n top: 100px;\n }\n\n </style>\n \"\"\",\n unsafe_allow_html=True,\n )" }, { "identifier": "mermaid", "path": "helper_functions.py", "snippet": "def mermaid(code: str, height=600) -> None:\n components.html(\n f\"\"\"\n <link href='http://fonts.googleapis.com/css?family=Lexend' rel='stylesheet' type='text/css'>\n\n <pre class=\"mermaid\">\n {code}\n </pre>\n\n <script type=\"module\">\n import mermaid from 'https://cdn.jsdelivr.net/npm/mermaid@10/dist/mermaid.esm.min.mjs';\n mermaid.initialize({{ startOnLoad: true }});\n </script>\n \"\"\",\n height=height\n )" }, { "identifier": "center_running", "path": "helper_functions.py", "snippet": "def center_running():\n \"\"\"\n Have the \"running man\" animation in the center of the screen instead of the top right corner.\n \"\"\"\n st.markdown(\"\"\"\n<style>\n\ndiv[class*=\"StatusWidget\"]{\n\n position: fixed;\n margin: auto;\n top: 50%;\n left: 50%;\n marginRight: \"0px\"\n width: 50%;\n scale: 2.75;\n opacity: 1\n}\n\n</style>\n\"\"\", \n unsafe_allow_html=True)" }, { "identifier": "Scenario", "path": "model_classes.py", "snippet": "class Scenario:\n '''\n Container class for scenario parameters/arguments\n\n Passed to a model and its process classes\n '''\n\n def __init__(self,\n random_number_set=1,\n n_triage=DEFAULT_N_TRIAGE,\n n_reg=DEFAULT_N_REG,\n n_exam=DEFAULT_N_EXAM,\n n_trauma=DEFAULT_N_TRAUMA,\n n_cubicles_1=DEFAULT_N_CUBICLES_1,\n n_cubicles_2=DEFAULT_N_CUBICLES_2,\n triage_mean=DEFAULT_TRIAGE_MEAN,\n reg_mean=DEFAULT_REG_MEAN,\n reg_var=DEFAULT_REG_VAR,\n exam_mean=DEFAULT_EXAM_MEAN,\n exam_var=DEFAULT_EXAM_VAR,\n trauma_mean=DEFAULT_TRAUMA_MEAN,\n trauma_treat_mean=DEFAULT_TRAUMA_TREAT_MEAN,\n trauma_treat_var=DEFAULT_TRAUMA_TREAT_VAR,\n non_trauma_treat_mean=DEFAULT_NON_TRAUMA_TREAT_MEAN,\n non_trauma_treat_var=DEFAULT_NON_TRAUMA_TREAT_VAR,\n non_trauma_treat_p=DEFAULT_NON_TRAUMA_TREAT_P,\n prob_trauma=DEFAULT_PROB_TRAUMA,\n arrival_df=NSPP_PATH,\n override_arrival_rate=OVERRIDE_ARRIVAL_RATE,\n manual_arrival_rate=MANUAL_ARRIVAL_RATE_VALUE,\n model=\"full\"\n ):\n '''\n Create a scenario to parameterise the simulation model\n\n Parameters:\n -----------\n random_number_set: int, optional (default=DEFAULT_RNG_SET)\n Set to control the initial seeds of each stream of pseudo\n random numbers used in the model.\n\n n_triage: int\n The number of triage cubicles\n\n n_reg: int\n The number of registration clerks\n\n n_exam: int\n The number of examination rooms\n\n n_trauma: int\n The number of trauma bays for stablisation\n\n n_cubicles_1: int\n The number of non-trauma treatment cubicles\n\n n_cubicles_2: int\n The number of trauma treatment cubicles\n\n triage_mean: float\n Mean duration of the triage distribution (Exponential)\n\n reg_mean: float\n Mean duration of the registration distribution (Lognormal)\n\n reg_var: float\n Variance of the registration distribution (Lognormal)\n\n exam_mean: float\n Mean of the examination distribution (Normal)\n\n exam_var: float\n Variance of the examination distribution (Normal)\n\n trauma_mean: float\n Mean of the trauma stabilisation distribution (Exponential)\n\n trauma_treat_mean: float\n Mean of the trauma cubicle treatment distribution (Lognormal)\n\n trauma_treat_var: float\n Variance of the trauma cubicle treatment distribution (Lognormal)\n\n non_trauma_treat_mean: float\n Mean of the non trauma treatment distribution\n\n non_trauma_treat_var: float\n Variance of the non trauma treatment distribution\n\n non_trauma_treat_p: float\n Probability non trauma patient requires treatment\n\n prob_trauma: float\n probability that a new arrival is a trauma patient.\n\n model: string\n What model to run. Default is full. \n Options are \"full\", \"simplest\", \"simple_with_branch\"\n '''\n # sampling\n self.random_number_set = random_number_set\n\n # store parameters for sampling\n self.triage_mean = triage_mean\n self.reg_mean = reg_mean\n self.reg_var = reg_var\n self.exam_mean = exam_mean\n self.exam_var = exam_var\n self.trauma_mean = trauma_mean\n self.trauma_treat_mean = trauma_treat_mean\n self.trauma_treat_var = trauma_treat_var\n self.non_trauma_treat_mean = non_trauma_treat_mean\n self.non_trauma_treat_var = non_trauma_treat_var\n self.non_trauma_treat_p = non_trauma_treat_p\n self.prob_trauma = prob_trauma\n self.manual_arrival_rate = manual_arrival_rate\n self.arrival_df = arrival_df\n self.override_arrival_rate = override_arrival_rate\n self.model = model\n\n self.init_sampling()\n\n # count of each type of resource\n self.init_resource_counts(n_triage, n_reg, n_exam, n_trauma,\n n_cubicles_1, n_cubicles_2)\n\n def set_random_no_set(self, random_number_set):\n '''\n Controls the random sampling \n Parameters:\n ----------\n random_number_set: int\n Used to control the set of psuedo random numbers\n used by the distributions in the simulation.\n '''\n self.random_number_set = random_number_set\n self.init_sampling()\n\n def init_resource_counts(self, n_triage, n_reg, n_exam, n_trauma,\n n_cubicles_1, n_cubicles_2):\n '''\n Init the counts of resources to default values...\n '''\n self.n_triage = n_triage\n self.n_reg = n_reg\n self.n_exam = n_exam\n self.n_trauma = n_trauma\n\n # non-trauma (1), trauma (2) treatment cubicles\n self.n_cubicles_1 = n_cubicles_1\n self.n_cubicles_2 = n_cubicles_2\n\n def init_sampling(self):\n '''\n Create the distributions used by the model and initialise \n the random seeds of each.\n '''\n # create random number streams\n rng_streams = np.random.default_rng(self.random_number_set)\n self.seeds = rng_streams.integers(0, 999999999, size=N_STREAMS)\n\n # create distributions\n\n # Triage duration\n self.triage_dist = Exponential(self.triage_mean,\n random_seed=self.seeds[0])\n\n # Registration duration (non-trauma only)\n self.reg_dist = Lognormal(self.reg_mean,\n np.sqrt(self.reg_var),\n random_seed=self.seeds[1])\n\n # Evaluation (non-trauma only)\n self.exam_dist = Normal(self.exam_mean,\n np.sqrt(self.exam_var),\n random_seed=self.seeds[2])\n\n # Trauma/stablisation duration (trauma only)\n self.trauma_dist = Exponential(self.trauma_mean,\n random_seed=self.seeds[3])\n\n # Non-trauma treatment\n self.nt_treat_dist = Lognormal(self.non_trauma_treat_mean,\n np.sqrt(self.non_trauma_treat_var),\n random_seed=self.seeds[4])\n\n # treatment of trauma patients\n self.treat_dist = Lognormal(self.trauma_treat_mean,\n np.sqrt(self.non_trauma_treat_var),\n random_seed=self.seeds[5])\n\n # probability of non-trauma patient requiring treatment\n self.nt_p_treat_dist = Bernoulli(self.non_trauma_treat_p,\n random_seed=self.seeds[6])\n\n # probability of non-trauma versus trauma patient\n self.p_trauma_dist = Bernoulli(self.prob_trauma,\n random_seed=self.seeds[7])\n\n # init sampling for non-stationary poisson process\n self.init_nspp()\n\n def init_nspp(self):\n\n # read arrival profile\n self.arrivals = pd.read_csv(NSPP_PATH) # pylint: disable=attribute-defined-outside-init\n self.arrivals['mean_iat'] = 60 / self.arrivals['arrival_rate']\n\n # maximum arrival rate (smallest time between arrivals)\n self.lambda_max = self.arrivals['arrival_rate'].max() # pylint: disable=attribute-defined-outside-init\n\n # thinning exponential\n if self.override_arrival_rate is True:\n\n self.arrival_dist = Exponential(self.manual_arrival_rate, # pylint: disable=attribute-defined-outside-init\n random_seed=self.seeds[8])\n else:\n self.arrival_dist = Exponential(60.0 / self.lambda_max, # pylint: disable=attribute-defined-outside-init\n random_seed=self.seeds[8])\n\n # thinning uniform rng\n self.thinning_rng = Uniform(low=0.0, high=1.0, # pylint: disable=attribute-defined-outside-init\n random_seed=self.seeds[9])" }, { "identifier": "multiple_replications", "path": "model_classes.py", "snippet": "def multiple_replications(scenario,\n rc_period=DEFAULT_RESULTS_COLLECTION_PERIOD,\n n_reps=5,\n return_detailed_logs=False):\n '''\n Perform multiple replications of the model.\n\n Params:\n ------\n scenario: Scenario\n Parameters/arguments to configurethe model\n\n rc_period: float, optional (default=DEFAULT_RESULTS_COLLECTION_PERIOD)\n results collection period. \n the number of minutes to run the model to collect results\n\n n_reps: int, optional (default=DEFAULT_N_REPS)\n Number of independent replications to run.\n\n Returns:\n --------\n pandas.DataFrame\n '''\n\n # if return_full_log:\n # results = [single_run(scenario,\n # rc_period,\n # random_no_set=(scenario.random_number_set)+rep,\n # return_full_log=True,\n # return_event_log=False)\n # for rep in range(n_reps)]\n\n # format and return results in a dataframe\n # df_results = pd.concat(reesults)\n # df_results.index = np.arange(1, len(df_results)+1)\n # df_results.index.name = 'rep'\n # return df_results\n # return results\n\n if return_detailed_logs:\n results = [{'rep': rep+1,\n 'results': single_run(scenario,\n rc_period,\n random_no_set=(scenario.random_number_set)+rep,\n return_detailed_logs=True)}\n # .assign(Rep=rep+1)\n for rep in range(n_reps)]\n\n # format and return results in a dataframe\n\n return results\n # {\n # {df_results = [pd.concat(result) for result in results] }\n # }\n # return results\n\n results = [single_run(scenario,\n rc_period,\n random_no_set=(scenario.random_number_set)+rep)\n for rep in range(n_reps)]\n\n # format and return results in a dataframe\n df_results = pd.concat(results)\n df_results.index = np.arange(1, len(df_results)+1)\n df_results.index.name = 'rep'\n return df_results" }, { "identifier": "Normal", "path": "distribution_classes.py", "snippet": "class Normal:\n '''\n Convenience class for the normal distribution.\n packages up distribution parameters, seed and random generator.\n '''\n def __init__(self, mean, sigma, random_seed=None):\n '''\n Constructor\n \n Params:\n ------\n mean: float\n The mean of the normal distribution\n \n sigma: float\n The stdev of the normal distribution\n \n random_seed: int, optional (default=None)\n A random seed to reproduce samples. If set to none then a unique\n sample is created.\n '''\n self.rng = np.random.default_rng(seed=random_seed)\n self.mean = mean\n self.sigma = sigma\n \n def sample(self, size=None):\n '''\n Generate a sample from the normal distribution\n \n Params:\n -------\n size: int, optional (default=None)\n the number of samples to return. If size=None then a single\n sample is returned.\n '''\n return self.rng.normal(self.mean, self.sigma, size=size)" }, { "identifier": "reshape_for_animations", "path": "output_animation_functions.py", "snippet": "def reshape_for_animations(full_event_log, every_x_minutes=10):\n minute_dfs = list()\n patient_dfs = list()\n\n for rep in range(1, max(full_event_log['rep'])+1):\n # print(\"Rep {}\".format(rep))\n # Start by getting data for a single rep\n filtered_log_rep = full_event_log[full_event_log['rep'] == rep].drop('rep', axis=1)\n pivoted_log = filtered_log_rep.pivot_table(values=\"time\", \n index=[\"patient\",\"event_type\",\"pathway\"], \n columns=\"event\").reset_index()\n\n for minute in range(10*60*24):\n # print(minute)\n # Get patients who arrived before the current minute and who left the system after the current minute\n # (or arrived but didn't reach the point of being seen before the model run ended)\n # When turning this into a function, think we will want user to pass\n # 'first step' and 'last step' or something similar\n # and will want to reshape the event log for this so that it has a clear start/end regardless\n # of pathway (move all the pathway stuff into a separate column?)\n\n # Think we maybe need a pathway order and pathway precedence column\n # But what about shared elements of each pathway?\n if minute % every_x_minutes == 0:\n\n try:\n current_patients_in_moment = pivoted_log[(pivoted_log['arrival'] <= minute) & \n (\n (pivoted_log['depart'] >= minute) |\n (pivoted_log['depart'].isnull() )\n )]['patient'].values\n except KeyError:\n current_patients_in_moment = None\n \n if current_patients_in_moment is not None:\n patient_minute_df = filtered_log_rep[filtered_log_rep['patient'].isin(current_patients_in_moment)]\n # print(len(patient_minute_df))\n # Grab just those clients from the filtered log (the unpivoted version)\n # Each person can only be in a single place at once, so filter out any events\n # that have taken place after the minute\n # then just take the latest event that has taken place for each client\n # most_recent_events_minute = patient_minute_df[patient_minute_df['time'] <= minute] \\\n # .sort_values('time', ascending=True) \\\n # .groupby(['patient',\"event_type\",\"pathway\"]) \\\n # .tail(1) \n\n most_recent_events_minute_ungrouped = patient_minute_df[patient_minute_df['time'] <= minute].reset_index() \\\n .sort_values(['time', 'index'], ascending=True) \\\n .groupby(['patient']) \\\n .tail(1) \n\n patient_dfs.append(most_recent_events_minute_ungrouped.assign(minute=minute, rep=rep))\n\n # Now count how many people are in each state\n # CHECK - I THINK THIS IS PROBABLY DOUBLE COUNTING PEOPLE BECAUSE OF THE PATHWAY AND EVENT TYPE. JUST JOIN PATHWAY/EVENT TYPE BACK IN INSTEAD?\n state_counts_minute = most_recent_events_minute_ungrouped[['event']].value_counts().rename(\"count\").reset_index().assign(minute=minute, rep=rep)\n \n minute_dfs.append(state_counts_minute)\n\n\n minute_counts_df = pd.concat(minute_dfs).merge(filtered_log_rep[['event','event_type', 'pathway']].drop_duplicates().reset_index(drop=True), on=\"event\")\n full_patient_df = pd.concat(patient_dfs).sort_values([\"rep\", \"minute\", \"event\"])\n\n # Add a final exit step for each client\n final_step = full_patient_df.sort_values([\"rep\", \"patient\", \"minute\"], ascending=True).groupby([\"rep\", \"patient\"]).tail(1)\n final_step['minute'] = final_step['minute'] + every_x_minutes\n final_step['event'] = \"exit\"\n # final_step['event_type'] = \"arrival_departure\"\n\n full_patient_df = full_patient_df.append(final_step)\n\n minute_counts_df_pivoted = minute_counts_df.pivot_table(values=\"count\", \n index=[\"minute\", \"rep\", \"event_type\", \"pathway\"], \n columns=\"event\").reset_index().fillna(0)\n\n minute_counts_df_complete = minute_counts_df_pivoted.melt(id_vars=[\"minute\", \"rep\",\"event_type\",\"pathway\"])\n\n return {\n \"minute_counts_df\": minute_counts_df,\n \"minute_counts_df_complete\": minute_counts_df_complete,\n \"full_patient_df\": full_patient_df.sort_values([\"rep\", \"minute\", \"event\"])\n \n }" }, { "identifier": "animate_activity_log", "path": "output_animation_functions.py", "snippet": "def animate_activity_log(\n full_patient_df,\n event_position_df,\n scenario,\n rep=1,\n plotly_height=900,\n plotly_width=None,\n wrap_queues_at=None,\n include_play_button=True,\n return_df_only=False,\n add_background_image=None,\n display_stage_labels=True,\n icon_and_text_size=24,\n override_x_max=None,\n override_y_max=None,\n time_display_units=None,\n setup_mode=False,\n frame_duration=400, #milliseconds\n frame_transition_duration=600 #milliseconds\n ):\n \"\"\"_summary_\n\n Args:\n full_patient_df (pd.Dataframe): \n \n event_position_dicts (pd.Dataframe): \n dataframe with three cols - event, x and y\n Can be more easily created by passing a list of dicts to pd.DataFrame\n list of dictionaries with one dicitionary per event type\n containing keys 'event', 'x' and 'y'\n This will determine the intial position of any entries in the animated log\n (think of it as the bottom right hand corner of any group of entities at each stage)\n\n scenario:\n Pass in an object that specifies the number of resources at different steps\n\n rep (int, optional): Defaults to 1.\n The replication of any model to include. Can only display one rep at a time, so will take\n the first rep if not otherwise specified. \n \n plotly_height (int, optional): Defaults to 900.\n\n Returns:\n Plotly fig object\n \"\"\" \n\n # Filter to only a single replication\n\n # TODO: Remove this from this function, and instead write a test\n # to ensure that no patient ID appears in multiple places at a single minute\n # and return an error if it does so\n # Move the step of ensuring there's only a single model run involved to outside\n # of this function as it's not really its job. \n\n full_patient_df = full_patient_df[full_patient_df['rep'] == rep].sort_values([\n 'event','minute','time'\n ])\n\n # full_patient_df['count'] = full_patient_df.groupby(['event','minute','rep'])['minute'] \\\n # .transform('count')\n \n # Order patients within event/minute/rep to determine their eventual position in the line\n full_patient_df['rank'] = full_patient_df.groupby(['event','minute','rep'])['minute'] \\\n .rank(method='first')\n\n full_patient_df_plus_pos = full_patient_df.merge(event_position_df, on=\"event\", how='left') \\\n .sort_values([\"rep\", \"event\", \"minute\", \"time\"])\n\n # Determine the position for any resource use steps\n resource_use = full_patient_df_plus_pos[full_patient_df_plus_pos['event_type'] == \"resource_use\"].copy()\n resource_use['y_final'] = resource_use['y']\n resource_use['x_final'] = resource_use['x'] - resource_use['resource_id']*10\n\n # Determine the position for any queuing steps\n queues = full_patient_df_plus_pos[full_patient_df_plus_pos['event_type']=='queue']\n queues['y_final'] = queues['y']\n queues['x_final'] = queues['x'] - queues['rank']*10\n\n # If we want people to wrap at a certain queue length, do this here\n # They'll wrap at the defined point and then the queue will start expanding upwards\n # from the starting row\n if wrap_queues_at is not None:\n queues['row'] = np.floor((queues['rank']) / (wrap_queues_at+1))\n queues['x_final'] = queues['x_final'] + (wrap_queues_at*queues['row']*10)\n queues['y_final'] = queues['y_final'] + (queues['row'] * 30)\n\n full_patient_df_plus_pos = pd.concat([queues, resource_use])\n\n # full_patient_df_plus_pos['icon'] = '🙍'\n\n individual_patients = full_patient_df['patient'].drop_duplicates().sort_values()\n \n # Recommend https://emojipedia.org/ for finding emojis to add to list\n # note that best compatibility across systems can be achieved by using \n # emojis from v12.0 and below - Windows 10 got no more updates after that point\n icon_list = [\n '🧔🏼', '👨🏿‍🦯', '👨🏻‍🦰', '🧑🏻', '👩🏿‍🦱', \n '🤰', '👳🏽', '👩🏼‍🦳', '👨🏿‍🦳', '👩🏼‍🦱', \n '🧍🏽‍♀️', '👨🏼‍🔬', '👩🏻‍🦰', '🧕🏿', '👨🏼‍🦽', \n '👴🏾', '👨🏼‍🦱', '👷🏾', '👧🏿', '🙎🏼‍♂️',\n '👩🏻‍🦲', '🧔🏾', '🧕🏻', '👨🏾‍🎓', '👨🏾‍🦲',\n '👨🏿‍🦰', '🙍🏼‍♂️', '🙋🏾‍♀️', '👩🏻‍🔧', '👨🏿‍🦽', \n '👩🏼‍🦳', '👩🏼‍🦼', '🙋🏽‍♂️', '👩🏿‍🎓', '👴🏻', \n '🤷🏻‍♀️', '👶🏾', '👨🏻‍✈️', '🙎🏿‍♀️', '👶🏻', \n '👴🏿', '👨🏻‍🦳', '👩🏽', '👩🏽‍🦳', '🧍🏼‍♂️', \n '👩🏽‍🎓', '👱🏻‍♀️', '👲🏼', '🧕🏾', '👨🏻‍🦯', \n '🧔🏿', '👳🏿', '🤦🏻‍♂️', '👩🏽‍🦰', '👨🏼‍✈️', \n '👨🏾‍🦲', '🧍🏾‍♂️', '👧🏼', '🤷🏿‍♂️', '👨🏿‍🔧', \n '👱🏾‍♂️', '👨🏼‍🎓', '👵🏼', '🤵🏿', '🤦🏾‍♀️',\n '👳🏻', '🙋🏼‍♂️', '👩🏻‍🎓', '👩🏼‍🌾', '👩🏾‍🔬',\n '👩🏿‍✈️', '🎅🏼', '👵🏿', '🤵🏻', '🤰'\n ]\n\n full_icon_list = icon_list * int(np.ceil(len(individual_patients)/len(icon_list)))\n\n full_icon_list = full_icon_list[0:len(individual_patients)]\n\n full_patient_df_plus_pos = full_patient_df_plus_pos.merge(\n pd.DataFrame({'patient':list(individual_patients),\n 'icon':full_icon_list}),\n on=\"patient\")\n\n if return_df_only:\n return full_patient_df_plus_pos\n\n if override_x_max is not None:\n x_max = override_x_max\n else:\n x_max = event_position_df['x'].max()*1.25\n\n if override_y_max is not None:\n y_max = override_x_max\n else:\n y_max = event_position_df['y'].max()*1.1\n\n # If we're displaying time as a clock instead of as units of whatever time our model\n # is working in, create a minute_display column that will display as a psuedo datetime\n \n # For now, it starts a few months after the current date, just to give the\n # idea of simulating some hypothetical future time. It might be nice to allow\n # the start point to be changed, particular if we're simulating something on\n # a larger timescale that includes a level of weekly or monthly seasonality.\n\n # We need to keep the original minute column in existance because it's important for sorting\n if time_display_units == \"dhm\":\n full_patient_df_plus_pos['minute'] = dt.date.today() + pd.DateOffset(days=165) + pd.TimedeltaIndex(full_patient_df_plus_pos['minute'], unit='m')\n # https://strftime.org/\n full_patient_df_plus_pos['minute_display'] = full_patient_df_plus_pos['minute'].apply(\n lambda x: dt.datetime.strftime(x, '%d %B %Y\\n%H:%M')\n )\n full_patient_df_plus_pos['minute'] = full_patient_df_plus_pos['minute'].apply(\n lambda x: dt.datetime.strftime(x, '%Y-%m-%d %H:%M')\n )\n else:\n full_patient_df_plus_pos['minute_display'] = full_patient_df_plus_pos['minute']\n\n # full_patient_df_plus_pos['size'] = 24\n\n # We are effectively making use of an animated plotly express scatterploy\n # to do all of the heavy lifting\n # Because of the way plots animate in this, it deals with all of the difficulty\n # of paths between individual positions - so we just have to tell it where to put\n # people at each defined step of the process, and the scattergraph will move them\n\n fig = px.scatter(\n full_patient_df_plus_pos.sort_values('minute'),\n x=\"x_final\",\n y=\"y_final\",\n # Each frame is one step of time, with the gap being determined\n # in the reshape_for_animation function\n animation_frame=\"minute_display\",\n # Important to group by patient here\n animation_group=\"patient\",\n text=\"icon\",\n # Can't have colours because it causes bugs with\n # lots of points failing to appear\n #color=\"event\",\n hover_name=\"event\",\n hover_data=[\"patient\", \"pathway\", \"time\", \"minute\", \"resource_id\"],\n # The approach of putting in the people as symbols didn't work\n # Went with making emoji text labels instead - this works better!\n # But leaving in as a reminder that the symbol approach doens't work.\n #symbol=\"rep\",\n #symbol_sequence=[\"⚽\"],\n #symbol_map=dict(rep_choice = \"⚽\"),\n range_x=[0, x_max],\n range_y=[0, y_max],\n height=plotly_height,\n width=plotly_width,\n # This sets the opacity of the points that sit behind\n opacity=0\n # size=\"size\"\n )\n\n # Now add labels identifying each stage (optional - can either be used\n # in conjunction with a background image or as a way to see stage names\n # without the need to create a background image)\n if display_stage_labels:\n fig.add_trace(go.Scatter(\n x=[pos+10 for pos in event_position_df['x'].to_list()],\n y=event_position_df['y'].to_list(),\n mode=\"text\",\n name=\"\",\n text=event_position_df['label'].to_list(),\n textposition=\"middle right\",\n hoverinfo='none'\n ))\n\n # Update the size of the icons and labels\n # This is what determines the size of the individual emojis that \n # represent our people!\n fig.update_traces(textfont_size=icon_and_text_size)\n\n # Finally add in icons to indicate the available resources\n # Make an additional dataframe that has one row per resource type\n # Then, starting from the initial position, make that many large circles\n # make them semi-transparent or you won't see the people using them! \n events_with_resources = event_position_df[event_position_df['resource'].notnull()].copy()\n events_with_resources['resource_count'] = events_with_resources['resource'].apply(lambda x: getattr(scenario, x))\n\n events_with_resources = events_with_resources.join(events_with_resources.apply(\n lambda r: pd.Series({'x_final': [r['x']-(10*(i+1)) for i in range(r['resource_count'])]}), axis=1).explode('x_final'),\n how='right')\n\n # This just adds an additional scatter trace that creates large dots\n # that represent the individual resources\n fig.add_trace(go.Scatter(\n x=events_with_resources['x_final'].to_list(),\n # Place these slightly below the y position for each entity\n # that will be using the resource\n y=[i-10 for i in events_with_resources['y'].to_list()],\n mode=\"markers\",\n # Define what the marker will look like\n marker=dict(\n color='LightSkyBlue',\n size=15),\n opacity=0.8,\n hoverinfo='none'\n ))\n\n # Optional step to add a background image\n # This can help to better visualise the layout/structure of a pathway\n # Simple FOSS tool for creating these background images is draw.io\n # Ideally your queueing steps should always be ABOVE your resource use steps\n # as this then results in people nicely flowing from the front of the queue \n # to the next stage\n if add_background_image is not None:\n fig.add_layout_image(\n dict(\n source=add_background_image,\n xref=\"x domain\",\n yref=\"y domain\",\n x=1,\n y=1,\n sizex=1,\n sizey=1,\n xanchor=\"right\",\n yanchor=\"top\",\n sizing=\"stretch\",\n opacity=0.5,\n layer=\"below\")\n )\n\n # We don't need any gridlines or tickmarks for the final output, so remove\n # However, can be useful for the initial setup phase of the outputs, so give the \n # option to inlcude\n if not setup_mode:\n fig.update_xaxes(showticklabels=False, showgrid=False, zeroline=False, \n # Prevent zoom\n fixedrange=True)\n fig.update_yaxes(showticklabels=False, showgrid=False, zeroline=False, \n # Prevent zoom\n fixedrange=True)\n\n fig.update_layout(yaxis_title=None, xaxis_title=None, showlegend=False,\n # Increase the size of the play button and animation timeline\n sliders=[dict(currentvalue=dict(font=dict(size=35) ,\n prefix=\"\"))]\n )\n\n # You can get rid of the play button if desired\n # Was more useful in older versions of the function\n if not include_play_button:\n fig[\"layout\"].pop(\"updatemenus\")\n\n # Adjust speed of animation\n fig.layout.updatemenus[0].buttons[0].args[1]['frame']['duration'] = frame_duration\n fig.layout.updatemenus[0].buttons[0].args[1]['transition']['duration'] = frame_transition_duration\n\n return fig" } ]
import asyncio import gc import pandas as pd import plotly.express as px import plotly.graph_objects as go import streamlit as st from helper_functions import add_logo, mermaid, center_running from model_classes import Scenario, multiple_replications from distribution_classes import Normal from output_animation_functions import reshape_for_animations, animate_activity_log
10,892
10, 300, step=5, value=120) with col2: consult_time = st.slider("⏱️ How long (in minutes) does a consultation take on average?", 5, 150, step=5, value=50) consult_time_sd = st.slider("🕔 🕣 How much (in minutes) does the time for a consultation usually vary by?", 5, 30, step=5, value=10) norm_dist = Normal(consult_time, consult_time_sd, random_seed=seed) norm_fig = px.histogram(norm_dist.sample(size=2500), height=150) norm_fig.update_layout(yaxis_title="", xaxis_title="Consultation Time<br>(Minutes)") norm_fig.update_xaxes(tick0=0, dtick=10, range=[0, # max(norm_dist.sample(size=2500)) 240 ]) norm_fig.layout.update(showlegend=False, margin=dict(l=0, r=0, t=0, b=0)) st.markdown("#### Consultation Time Distribution") st.plotly_chart(norm_fig, use_container_width=True, config = {'displayModeBar': False}) # A user must press a streamlit button to run the model button_run_pressed = st.button("Run simulation") if button_run_pressed: # add a spinner and then display success box with st.spinner('Simulating the minor injuries unit...'): args = Scenario( random_number_set=seed, n_cubicles_1=nurses, override_arrival_rate=True, manual_arrival_rate=60/(mean_arrivals_per_day/24), model="simplest", trauma_treat_mean=consult_time, trauma_treat_var=consult_time_sd ) await asyncio.sleep(0.1) # run multiple replications of experment detailed_outputs = multiple_replications( args, n_reps=n_reps, rc_period=run_time_days*60*24, return_detailed_logs=True ) results = pd.concat([detailed_outputs[i]['results']['summary_df'].assign(rep= i+1) for i in range(n_reps)]).set_index('rep') full_event_log = pd.concat([detailed_outputs[i]['results']['full_event_log'].assign(rep= i+1) for i in range(n_reps)]) del detailed_outputs gc.collect() animation_dfs_log = reshape_for_animations( full_event_log=full_event_log[ (full_event_log['rep']==1) & ((full_event_log['event_type']=='queue') | (full_event_log['event_type']=='resource_use') | (full_event_log['event_type']=='arrival_departure')) & # Limit to first 5 days (full_event_log['time'] <= 60*24*5) ], every_x_minutes=5 )['full_patient_df'] del full_event_log gc.collect() if button_run_pressed: tab1, tab2, tab3 = st.tabs( ["Animated Log", "Simple Graphs", "Advanced Graphs"] ) # st.markdown(""" # You can click on the three tabs below ("Animated Log", "Simple Graphs", and "Advanced Graphs") to view different outputs from the model. # """) with tab1: # st.write(results) st.subheader("Animated Model Output") with st.spinner('Generating the animated patient log...'): event_position_df = pd.DataFrame([ {'event': 'arrival', 'x': 50, 'y': 300, 'label': "Arrival" }, # Triage - minor and trauma {'event': 'treatment_wait_begins', 'x': 190, 'y': 170, 'label': "Waiting for Treatment" }, {'event': 'treatment_begins', 'x': 190, 'y': 110, 'resource':'n_cubicles_1', 'label': "Being Treated" }, {'event': 'exit', 'x': 270, 'y': 70, 'label': "Exit"} ]) st.markdown( """ The plot below shows a snapshot every 5 minutes of the position of everyone in our emergency department model. The buttons to the left of the slider below the plot can be used to start and stop the animation. Clicking on the bar below the plot and dragging your cursor to the left or right allows you to rapidly jump through to a different time in the simulation. Only the first replication of the simulation is shown. """ )
''' A Streamlit application based on Monks and Allows users to interact with an increasingly more complex treatment simulation ''' # Set page parameters st.set_page_config( page_title="Using a Simple Resource", layout="wide", initial_sidebar_state="expanded", ) # Add the logo add_logo() center_running() # Import the stylesheet with open("style.css") as css: st.markdown( f'<style>{css.read()}</style>' , unsafe_allow_html= True) st.title("Discrete Event Simulation Playground") st.subheader("Using a Simple Resource: Sending Patients to be Treated") gc.collect() # tab1, tab2, tab3 = st.tabs(["Introduction", "Exercise", "Playground"]) tab1, tab2, tab3 = st.tabs(["Playground", "Exercise", "Information"]) with tab3: st.markdown( """ Now, it's all well and good having patients arrive, but at the moment there is no-one and nowhere to see them! We need to add our first resource. Resources exist inside our simulation, and can be nurses, rooms, ambulances - whatever we need them to be. When someone reaches the front of the queue, they will be allocated to a resource that is currently free. They will hold onto this resource for as long as they need it, and then they will let go of it and move on to the next part of the process. This means resources can continue to be reused again and again in the system, unlike our arrivals. So for now, let's make it so that when someone arrives, they need to be treated, and to do this they will need a resource. For now, we're keeping it simple - let's assume each nurse has a room that they treat people in. They always stay in this room, and as soon as a patient has finished being treated, the patient will leave and the nurse (and room) will become available again. This means we just have one type of resource to worry about. """ ) mermaid(height=175, code= """ %%{ init: { 'flowchart': { 'curve': 'step' } } }%% %%{ init: { 'theme': 'base', 'themeVariables': {'lineColor': '#b4b4b4'} } }%% flowchart LR A[Arrival]----> B[Treatment] B -.-> C([Nurse/Cubicle\n<b>RESOURCE</b>]) C -.-> B B ----> F[Discharge] classDef default font-size:18pt,font-family:lexend; linkStyle default stroke:white; """ ) st.markdown( """ For now, we'll assume all of our patients are roughly equally injured - but there might still be some variation in how long it takes to treat them. Some might need a few stitches, some might just need a quick bit of advice. This time, we're going to sample from a different distribution - the normal distribution. A few people won't take very long to fix up, while a few might take quite a long time - but most of the people will take an amount of time that's somewhere in the middle. """) norm_dist_example = Normal(mean=50, sigma=10) norm_fig_example = px.histogram(norm_dist_example.sample(size=5000), height=300) norm_fig_example.update_layout(yaxis_title="", xaxis_title="Consultation Time<br>(Minutes)") norm_fig_example.layout.update(showlegend=False, margin=dict(l=0, r=0, t=0, b=0)) st.plotly_chart(norm_fig_example, use_container_width=True) st.markdown(""" We're going to start measuring a few more things now - how much of each resource's time is spent with patients **(known as resource utilisation)** - how long each patient waits before they get allocated a resource - what percentage of patients meet a target of being treated within 2 hours of turning up to our treatment centre """) with tab2: st.markdown( """ ### Things to Try Out - Try changing the sliders for consultation time and variation in consultation time. What happens to the shape of the graph below the sliders? --- - Put the consulation times back to the default (50 minutes length on average, 10 minutes of variation). Run the model and take a look at the animated flow of patients through the system. What do you notice about - the number of nurses in use? Do they ever get any breaks? - the size of the queue for treatment at different times - does it get bigger and smaller at different times, or just keep growing? --- - What happens when you play around with the number of nurses we have available? - Look at the queues, but look at the resource utilisation too. The resource utilisation tells us how much of the time each nurse is busy rather than waiting for a patient to turn up. - Can you find a middle ground where the nurse is being used a good amount without the queues building up? --- """) with st.expander("Click here for bonus exercises"): st.markdown( """ - What happens to the average utilisation and waits when you keep the number of nurses the same but change - the average length of time it takes each patient to be seen? - the variability in the length of time it takes each patient to be seen? """ ) with tab1: col1, col2 = st.columns(2) with col1: nurses = st.slider("👨‍⚕️👩‍⚕️ How Many Rooms/Nurses Are Available?", 1, 15, step=1, value=4) seed = st.slider("🎲 Set a random number for the computer to start from", 1, 1000, step=1, value=42) with st.expander("Previous Parameters"): st.markdown("If you like, you can edit these parameters too!") n_reps = st.slider("🔁 How many times should the simulation run?", 1, 30, step=1, value=6) run_time_days = st.slider("🗓️ How many days should we run the simulation for each time?", 1, 40, step=1, value=10) mean_arrivals_per_day = st.slider("🧍 How many patients should arrive per day on average?", 10, 300, step=5, value=120) with col2: consult_time = st.slider("⏱️ How long (in minutes) does a consultation take on average?", 5, 150, step=5, value=50) consult_time_sd = st.slider("🕔 🕣 How much (in minutes) does the time for a consultation usually vary by?", 5, 30, step=5, value=10) norm_dist = Normal(consult_time, consult_time_sd, random_seed=seed) norm_fig = px.histogram(norm_dist.sample(size=2500), height=150) norm_fig.update_layout(yaxis_title="", xaxis_title="Consultation Time<br>(Minutes)") norm_fig.update_xaxes(tick0=0, dtick=10, range=[0, # max(norm_dist.sample(size=2500)) 240 ]) norm_fig.layout.update(showlegend=False, margin=dict(l=0, r=0, t=0, b=0)) st.markdown("#### Consultation Time Distribution") st.plotly_chart(norm_fig, use_container_width=True, config = {'displayModeBar': False}) # A user must press a streamlit button to run the model button_run_pressed = st.button("Run simulation") if button_run_pressed: # add a spinner and then display success box with st.spinner('Simulating the minor injuries unit...'): args = Scenario( random_number_set=seed, n_cubicles_1=nurses, override_arrival_rate=True, manual_arrival_rate=60/(mean_arrivals_per_day/24), model="simplest", trauma_treat_mean=consult_time, trauma_treat_var=consult_time_sd ) await asyncio.sleep(0.1) # run multiple replications of experment detailed_outputs = multiple_replications( args, n_reps=n_reps, rc_period=run_time_days*60*24, return_detailed_logs=True ) results = pd.concat([detailed_outputs[i]['results']['summary_df'].assign(rep= i+1) for i in range(n_reps)]).set_index('rep') full_event_log = pd.concat([detailed_outputs[i]['results']['full_event_log'].assign(rep= i+1) for i in range(n_reps)]) del detailed_outputs gc.collect() animation_dfs_log = reshape_for_animations( full_event_log=full_event_log[ (full_event_log['rep']==1) & ((full_event_log['event_type']=='queue') | (full_event_log['event_type']=='resource_use') | (full_event_log['event_type']=='arrival_departure')) & # Limit to first 5 days (full_event_log['time'] <= 60*24*5) ], every_x_minutes=5 )['full_patient_df'] del full_event_log gc.collect() if button_run_pressed: tab1, tab2, tab3 = st.tabs( ["Animated Log", "Simple Graphs", "Advanced Graphs"] ) # st.markdown(""" # You can click on the three tabs below ("Animated Log", "Simple Graphs", and "Advanced Graphs") to view different outputs from the model. # """) with tab1: # st.write(results) st.subheader("Animated Model Output") with st.spinner('Generating the animated patient log...'): event_position_df = pd.DataFrame([ {'event': 'arrival', 'x': 50, 'y': 300, 'label': "Arrival" }, # Triage - minor and trauma {'event': 'treatment_wait_begins', 'x': 190, 'y': 170, 'label': "Waiting for Treatment" }, {'event': 'treatment_begins', 'x': 190, 'y': 110, 'resource':'n_cubicles_1', 'label': "Being Treated" }, {'event': 'exit', 'x': 270, 'y': 70, 'label': "Exit"} ]) st.markdown( """ The plot below shows a snapshot every 5 minutes of the position of everyone in our emergency department model. The buttons to the left of the slider below the plot can be used to start and stop the animation. Clicking on the bar below the plot and dragging your cursor to the left or right allows you to rapidly jump through to a different time in the simulation. Only the first replication of the simulation is shown. """ )
st.plotly_chart(animate_activity_log(
7
2023-10-26 09:57:52+00:00
16k
hyperspy/exspy
exspy/models/edsmodel.py
[ { "identifier": "_get_element_and_line", "path": "exspy/misc/eds/utils.py", "snippet": "def _get_element_and_line(xray_line):\n \"\"\"\n Returns the element name and line character for a particular X-ray line as\n a tuple.\n\n By example, if xray_line = 'Mn_Ka' this function returns ('Mn', 'Ka')\n \"\"\"\n lim = xray_line.find(\"_\")\n if lim == -1:\n raise ValueError(f\"Invalid xray-line: {xray_line}\")\n return xray_line[:lim], xray_line[lim + 1 :]" }, { "identifier": "EDSSpectrum", "path": "exspy/signals/eds.py", "snippet": "class EDSSpectrum(Signal1D):\n \"\"\"General signal class for EDS spectra.\"\"\"\n\n _signal_type = \"EDS\"\n\n def __init__(self, *args, **kwards):\n super().__init__(*args, **kwards)\n if self.metadata.Signal.signal_type == \"EDS\":\n warnings.warn(\n \"The microscope type is not set. Use \"\n \"set_signal_type('EDS_TEM') \"\n \"or set_signal_type('EDS_SEM')\"\n )\n self.axes_manager.signal_axes[0].is_binned = True\n self._xray_markers = {}\n\n def _get_line_energy(self, Xray_line, FWHM_MnKa=None):\n \"\"\"\n Get the line energy and the energy resolution of a Xray line.\n\n The return values are in the same units than the signal axis\n\n Parameters\n ----------\n Xray_line : strings\n Valid element X-ray lines e.g. Fe_Kb\n FWHM_MnKa: {None, float, 'auto'}\n The energy resolution of the detector in eV\n if 'auto', used the one in\n 'self.metadata.Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa'\n\n Returns\n -------\n float: the line energy, if FWHM_MnKa is None\n (float,float): the line energy and the energy resolution, if FWHM_MnKa\n is not None\n \"\"\"\n\n units_name = self.axes_manager.signal_axes[0].units\n\n if FWHM_MnKa == \"auto\":\n if self.metadata.Signal.signal_type == \"EDS_SEM\":\n FWHM_MnKa = (\n self.metadata.Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa\n )\n elif self.metadata.Signal.signal_type == \"EDS_TEM\":\n FWHM_MnKa = (\n self.metadata.Acquisition_instrument.TEM.Detector.EDS.energy_resolution_MnKa\n )\n else:\n raise NotImplementedError(\n \"This method only works for EDS_TEM or EDS_SEM signals. \"\n \"You can use `set_signal_type('EDS_TEM')` or\"\n \"`set_signal_type('EDS_SEM')` to convert to one of these\"\n \"signal types.\"\n )\n line_energy = utils_eds._get_energy_xray_line(Xray_line)\n if units_name == \"eV\":\n line_energy *= 1000\n if FWHM_MnKa is not None:\n line_FWHM = (\n utils_eds.get_FWHM_at_Energy(FWHM_MnKa, line_energy / 1000) * 1000\n )\n elif units_name == \"keV\":\n if FWHM_MnKa is not None:\n line_FWHM = utils_eds.get_FWHM_at_Energy(FWHM_MnKa, line_energy)\n else:\n raise ValueError(\n f\"{units_name} is not a valid units for the energy axis. \"\n \"Only `eV` and `keV` are supported. \"\n \"If `s` is the variable containing this EDS spectrum:\\n \"\n \">>> s.axes_manager.signal_axes[0].units = 'keV' \\n\"\n )\n if FWHM_MnKa is None:\n return line_energy\n else:\n return line_energy, line_FWHM\n\n def _get_beam_energy(self):\n \"\"\"\n Get the beam energy.\n\n The return value is in the same units than the signal axis\n \"\"\"\n\n if \"Acquisition_instrument.SEM.beam_energy\" in self.metadata:\n beam_energy = self.metadata.Acquisition_instrument.SEM.beam_energy\n elif \"Acquisition_instrument.TEM.beam_energy\" in self.metadata:\n beam_energy = self.metadata.Acquisition_instrument.TEM.beam_energy\n else:\n raise AttributeError(\n \"The beam energy is not defined in `metadata`. \"\n \"Use `set_microscope_parameters` to set it.\"\n )\n\n units_name = self.axes_manager.signal_axes[0].units\n\n if units_name == \"eV\":\n beam_energy *= 1000\n return beam_energy\n\n def _get_xray_lines_in_spectral_range(self, xray_lines):\n \"\"\"\n Return the lines in the energy range\n\n Parameters\n ----------\n xray_lines: List of string\n The xray_lines\n\n Return\n ------\n The list of xray_lines in the energy range\n \"\"\"\n ax = self.axes_manager.signal_axes[0]\n low_value = ax.low_value\n high_value = ax.high_value\n try:\n if self._get_beam_energy() < high_value:\n high_value = self._get_beam_energy()\n except AttributeError:\n # in case the beam energy is not defined in the metadata\n pass\n xray_lines_in_range = []\n xray_lines_not_in_range = []\n for xray_line in xray_lines:\n line_energy = self._get_line_energy(xray_line)\n if low_value < line_energy < high_value:\n xray_lines_in_range.append(xray_line)\n else:\n xray_lines_not_in_range.append(xray_line)\n return xray_lines_in_range, xray_lines_not_in_range\n\n def sum(self, axis=None, out=None, rechunk=False):\n if axis is None:\n axis = self.axes_manager.navigation_axes\n s = super().sum(axis=axis, out=out, rechunk=rechunk)\n s = out or s\n\n # Update live time by the change in navigation axes dimensions\n time_factor = np.prod(\n [ax.size for ax in self.axes_manager.navigation_axes]\n ) / np.prod([ax.size for ax in s.axes_manager.navigation_axes])\n aimd = s.metadata.get_item(\"Acquisition_instrument\", None)\n if aimd is not None:\n aimd = s.metadata.Acquisition_instrument\n if \"SEM.Detector.EDS.live_time\" in aimd:\n aimd.SEM.Detector.EDS.live_time *= time_factor\n elif \"TEM.Detector.EDS.live_time\" in aimd:\n aimd.TEM.Detector.EDS.live_time *= time_factor\n else:\n _logger.info(\n \"Live_time could not be found in the metadata and \"\n \"has not been updated.\"\n )\n\n if out is None:\n return s\n\n sum.__doc__ = Signal1D.sum.__doc__\n\n def rebin(self, new_shape=None, scale=None, crop=True, dtype=None, out=None):\n factors = self._validate_rebin_args_and_get_factors(\n new_shape=new_shape,\n scale=scale,\n )\n m = super().rebin(\n new_shape=new_shape, scale=scale, crop=crop, dtype=dtype, out=out\n )\n m = out or m\n time_factor = np.prod(\n [factors[axis.index_in_array] for axis in m.axes_manager.navigation_axes]\n )\n aimd = m.metadata.Acquisition_instrument\n if \"Acquisition_instrument.SEM.Detector.EDS.real_time\" in m.metadata:\n aimd.SEM.Detector.EDS.real_time *= time_factor\n elif \"Acquisition_instrument.TEM.Detector.EDS.real_time\" in m.metadata:\n aimd.TEM.Detector.EDS.real_time *= time_factor\n else:\n _logger.info(\n \"real_time could not be found in the metadata and has not been updated.\"\n )\n if \"Acquisition_instrument.SEM.Detector.EDS.live_time\" in m.metadata:\n aimd.SEM.Detector.EDS.live_time *= time_factor\n elif \"Acquisition_instrument.TEM.Detector.EDS.live_time\" in m.metadata:\n aimd.TEM.Detector.EDS.live_time *= time_factor\n else:\n _logger.info(\n \"Live_time could not be found in the metadata and has not been updated.\"\n )\n\n if out is None:\n return m\n else:\n out.events.data_changed.trigger(obj=out)\n return m\n\n rebin.__doc__ = BaseSignal.rebin.__doc__\n\n def set_elements(self, elements):\n \"\"\"Erase all elements and set them.\n\n Parameters\n ----------\n elements : list of strings\n A list of chemical element symbols.\n\n See also\n --------\n add_elements, set_lines, add_lines\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> print(s.metadata.Sample.elements)\n >>> s.set_elements(['Al'])\n >>> print(s.metadata.Sample.elements)\n ['Al' 'C' 'Cu' 'Mn' 'Zr']\n ['Al']\n\n \"\"\"\n # Erase previous elements and X-ray lines\n if \"Sample.elements\" in self.metadata:\n del self.metadata.Sample.elements\n self.add_elements(elements)\n\n def add_elements(self, elements):\n \"\"\"Add elements and the corresponding X-ray lines.\n\n The list of elements is stored in `metadata.Sample.elements`\n\n Parameters\n ----------\n elements : list of strings\n The symbol of the elements.\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> print(s.metadata.Sample.elements)\n >>> s.add_elements(['Ar'])\n >>> print(s.metadata.Sample.elements)\n ['Al' 'C' 'Cu' 'Mn' 'Zr']\n ['Al', 'Ar', 'C', 'Cu', 'Mn', 'Zr']\n\n See also\n --------\n set_elements, add_lines, set_lines\n\n \"\"\"\n if not isiterable(elements) or isinstance(elements, str):\n raise ValueError(\n \"Input must be in the form of a list. For example, \"\n \"if `s` is the variable containing this EDS spectrum:\\n \"\n \">>> s.add_elements(('C',))\\n\"\n \"See the docstring for more information.\"\n )\n if \"Sample.elements\" in self.metadata:\n elements_ = set(self.metadata.Sample.elements)\n else:\n elements_ = set()\n for element in elements:\n if element in elements_db:\n elements_.add(element)\n else:\n raise ValueError(f\"{element} is not a valid chemical element symbol.\")\n self.metadata.set_item(\"Sample.elements\", sorted(list(elements_)))\n\n def _get_xray_lines(self, xray_lines=None, only_one=None, only_lines=(\"a\",)):\n if xray_lines is None:\n if \"Sample.xray_lines\" in self.metadata:\n xray_lines = self.metadata.Sample.xray_lines\n elif \"Sample.elements\" in self.metadata:\n xray_lines = self._get_lines_from_elements(\n self.metadata.Sample.elements,\n only_one=only_one,\n only_lines=only_lines,\n )\n else:\n raise ValueError(\"Not X-ray line, set them with `add_elements`.\")\n return xray_lines\n\n def set_lines(self, lines, only_one=True, only_lines=(\"a\",)):\n \"\"\"Erase all Xrays lines and set them.\n\n See add_lines for details.\n\n Parameters\n ----------\n lines : list of strings\n A list of valid element X-ray lines to add e.g. Fe_Kb.\n Additionally, if `metadata.Sample.elements` is\n defined, add the lines of those elements that where not\n given in this list.\n only_one: bool\n If False, add all the lines of each element in\n `metadata.Sample.elements` that has not line\n defined in lines. If True (default),\n only add the line at the highest energy\n above an overvoltage of 2 (< beam energy / 2).\n only_lines : {None, list of strings}\n If not None, only the given lines will be added.\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.add_lines()\n >>> print(s.metadata.Sample.xray_lines)\n >>> s.set_lines(['Cu_Ka'])\n >>> print(s.metadata.Sample.xray_lines)\n ['Al_Ka', 'C_Ka', 'Cu_La', 'Mn_La', 'Zr_La']\n ['Al_Ka', 'C_Ka', 'Cu_Ka', 'Mn_La', 'Zr_La']\n\n See also\n --------\n add_lines, add_elements, set_elements\n\n \"\"\"\n only_lines = utils_eds._parse_only_lines(only_lines)\n if \"Sample.xray_lines\" in self.metadata:\n del self.metadata.Sample.xray_lines\n self.add_lines(lines=lines, only_one=only_one, only_lines=only_lines)\n\n def add_lines(self, lines=(), only_one=True, only_lines=(\"a\",)):\n \"\"\"Add X-rays lines to the internal list.\n\n Although most functions do not require an internal list of\n X-ray lines because they can be calculated from the internal\n list of elements, ocassionally it might be useful to customize the\n X-ray lines to be use by all functions by default using this method.\n The list of X-ray lines is stored in\n `metadata.Sample.xray_lines`\n\n Parameters\n ----------\n lines : list of strings\n A list of valid element X-ray lines to add e.g. Fe_Kb.\n Additionally, if `metadata.Sample.elements` is\n defined, add the lines of those elements that where not\n given in this list. If the list is empty (default), and\n `metadata.Sample.elements` is\n defined, add the lines of all those elements.\n only_one: bool\n If False, add all the lines of each element in\n `metadata.Sample.elements` that has not line\n defined in lines. If True (default),\n only add the line at the highest energy\n above an overvoltage of 2 (< beam energy / 2).\n only_lines : {None, list of strings}\n If not None, only the given lines will be added.\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.add_lines()\n >>> print(s.metadata.Sample.xray_lines)\n ['Al_Ka', 'C_Ka', 'Cu_La', 'Mn_La', 'Zr_La']\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.set_microscope_parameters(beam_energy=30)\n >>> s.add_lines()\n >>> print(s.metadata.Sample.xray_lines)\n ['Al_Ka', 'C_Ka', 'Cu_Ka', 'Mn_Ka', 'Zr_La']\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.add_lines()\n >>> print(s.metadata.Sample.xray_lines)\n >>> s.add_lines(['Cu_Ka'])\n >>> print(s.metadata.Sample.xray_lines)\n ['Al_Ka', 'C_Ka', 'Cu_La', 'Mn_La', 'Zr_La']\n ['Al_Ka', 'C_Ka', 'Cu_Ka', 'Cu_La', 'Mn_La', 'Zr_La']\n\n See also\n --------\n set_lines, add_elements, set_elements\n\n \"\"\"\n only_lines = utils_eds._parse_only_lines(only_lines)\n if \"Sample.xray_lines\" in self.metadata:\n xray_lines = set(self.metadata.Sample.xray_lines)\n else:\n xray_lines = set()\n # Define the elements which Xray lines has been customized\n # So that we don't attempt to add new lines automatically\n elements = set()\n for line in xray_lines:\n elements.add(line.split(\"_\")[0])\n for line in lines:\n try:\n element, subshell = line.split(\"_\")\n except ValueError:\n raise ValueError(\n \"Invalid line symbol. \"\n \"Please provide a valid line symbol e.g. Fe_Ka\"\n )\n if element in elements_db:\n elements.add(element)\n if subshell in elements_db[element][\"Atomic_properties\"][\"Xray_lines\"]:\n lines_len = len(xray_lines)\n xray_lines.add(line)\n if lines_len != len(xray_lines):\n _logger.info(f\"{line} line added,\")\n else:\n _logger.info(f\"{line} line already in.\")\n else:\n raise ValueError(f\"{line} is not a valid line of {element}.\")\n else:\n raise ValueError(f\"{element} is not a valid symbol of an element.\")\n xray_not_here = self._get_xray_lines_in_spectral_range(xray_lines)[1]\n for xray in xray_not_here:\n warnings.warn(f\"{xray} is not in the data energy range.\", UserWarning)\n if \"Sample.elements\" in self.metadata:\n extra_elements = set(self.metadata.Sample.elements) - elements\n if extra_elements:\n new_lines = self._get_lines_from_elements(\n extra_elements, only_one=only_one, only_lines=only_lines\n )\n if new_lines:\n self.add_lines(list(new_lines) + list(lines))\n self.add_elements(elements)\n if not hasattr(self.metadata, \"Sample\"):\n self.metadata.add_node(\"Sample\")\n if \"Sample.xray_lines\" in self.metadata:\n xray_lines = xray_lines.union(self.metadata.Sample.xray_lines)\n self.metadata.Sample.xray_lines = sorted(list(xray_lines))\n\n def _get_lines_from_elements(self, elements, only_one=False, only_lines=(\"a\",)):\n \"\"\"Returns the X-ray lines of the given elements in spectral range\n of the data.\n\n Parameters\n ----------\n elements : list of strings\n A list containing the symbol of the chemical elements.\n only_one : bool\n If False, add all the lines of each element in the data spectral\n range. If True only add the line at the highest energy\n above an overvoltage of 2 (< beam energy / 2).\n only_lines : {None, list of strings}\n If not None, only the given lines will be returned.\n\n Returns\n -------\n list of X-ray lines alphabetically sorted\n\n \"\"\"\n\n only_lines = utils_eds._parse_only_lines(only_lines)\n try:\n beam_energy = self._get_beam_energy()\n except BaseException:\n # Fall back to the high_value of the energy axis\n beam_energy = self.axes_manager.signal_axes[0].high_value\n lines = []\n elements = [el if isinstance(el, str) else el.decode() for el in elements]\n for element in elements:\n # Possible line (existing and excited by electron)\n element_lines = []\n for subshell in list(\n elements_db[element][\"Atomic_properties\"][\"Xray_lines\"].keys()\n ):\n if only_lines and subshell not in only_lines:\n continue\n element_lines.append(element + \"_\" + subshell)\n element_lines = self._get_xray_lines_in_spectral_range(element_lines)[0]\n if only_one and element_lines:\n # Choose the best line\n select_this = -1\n element_lines.sort()\n for i, line in enumerate(element_lines):\n if self._get_line_energy(line) < beam_energy / 2:\n select_this = i\n break\n element_lines = [\n element_lines[select_this],\n ]\n\n if not element_lines:\n _logger.info(\n f\"There is no X-ray line for element {element} \"\n \"in the data spectral range\"\n )\n else:\n lines.extend(element_lines)\n lines.sort()\n return lines\n\n def _parse_xray_lines(self, xray_lines, only_one, only_lines):\n only_lines = utils_eds._parse_only_lines(only_lines)\n xray_lines = self._get_xray_lines(\n xray_lines, only_one=only_one, only_lines=only_lines\n )\n xray_lines, xray_not_here = self._get_xray_lines_in_spectral_range(xray_lines)\n for xray in xray_not_here:\n warnings.warn(\n f\"{xray} is not in the data energy range. \"\n \"You can remove it with: \"\n f\"`s.metadata.Sample.xray_lines.remove('{xray}')`\"\n )\n return xray_lines\n\n def get_lines_intensity(\n self,\n xray_lines=None,\n integration_windows=2.0,\n background_windows=None,\n plot_result=False,\n only_one=True,\n only_lines=(\"a\",),\n **kwargs,\n ):\n \"\"\"Return the intensity map of selected Xray lines.\n\n The intensities, the number of X-ray counts, are computed by\n suming the spectrum over the\n different X-ray lines. The sum window width\n is calculated from the energy resolution of the detector\n as defined in 'energy_resolution_MnKa' of the metadata.\n Backgrounds average in provided windows can be subtracted from the\n intensities.\n\n Parameters\n ----------\n xray_lines: {None, Iterable* of strings}\n If None,\n if `metadata.Sample.elements.xray_lines` contains a\n list of lines use those.\n If `metadata.Sample.elements.xray_lines` is undefined\n or empty but `metadata.Sample.elements` is defined,\n use the same syntax as `add_line` to select a subset of lines\n for the operation.\n Alternatively, provide an iterable containing\n a list of valid X-ray lines symbols.\n * Note that while dictionaries and strings are iterable,\n their use is ambiguous and specifically not allowed.\n integration_windows: Float or array\n If float, the width of the integration windows is the\n 'integration_windows_width' times the calculated FWHM of the line.\n Else provide an array for which each row corresponds to a X-ray\n line. Each row contains the left and right value of the window.\n background_windows: None or 2D array of float\n If None, no background subtraction. Else, the backgrounds average\n in the windows are subtracted from the return intensities.\n 'background_windows' provides the position of the windows in\n energy. Each line corresponds to a X-ray line. In a line, the two\n first values correspond to the limits of the left window and the\n two last values correspond to the limits of the right window.\n plot_result : bool\n If True, plot the calculated line intensities. If the current\n object is a single spectrum it prints the result instead.\n only_one : bool\n If False, use all the lines of each element in the data spectral\n range. If True use only the line at the highest energy\n above an overvoltage of 2 (< beam energy / 2).\n only_lines : {None, list of strings}\n If not None, use only the given lines.\n kwargs\n The extra keyword arguments for plotting. See\n `utils.plot.plot_signals`\n\n Returns\n -------\n intensities : list\n A list containing the intensities as BaseSignal subclasses.\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.get_lines_intensity(['Mn_Ka'], plot_result=True)\n Mn_La at 0.63316 keV : Intensity = 96700.00\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.plot(['Mn_Ka'], integration_windows=2.1)\n >>> s.get_lines_intensity(['Mn_Ka'],\n >>> integration_windows=2.1, plot_result=True)\n Mn_Ka at 5.8987 keV : Intensity = 53597.00\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.set_elements(['Mn'])\n >>> s.set_lines(['Mn_Ka'])\n >>> bw = s.estimate_background_windows()\n >>> s.plot(background_windows=bw)\n >>> s.get_lines_intensity(background_windows=bw, plot_result=True)\n Mn_Ka at 5.8987 keV : Intensity = 46716.00\n\n See also\n --------\n set_elements, add_elements, estimate_background_windows,\n plot\n\n \"\"\"\n if xray_lines is not None and (\n not isinstance(xray_lines, Iterable) or isinstance(xray_lines, (str, dict))\n ):\n raise TypeError(\n \"xray_lines must be a compatible iterable, but was \"\n f\"mistakenly provided as a {type(xray_lines)}.\"\n )\n\n xray_lines = self._parse_xray_lines(xray_lines, only_one, only_lines)\n if hasattr(integration_windows, \"__iter__\") is False:\n integration_windows = self.estimate_integration_windows(\n windows_width=integration_windows, xray_lines=xray_lines\n )\n intensities = []\n ax = self.axes_manager.signal_axes[0]\n # test Signal1D (0D problem)\n # signal_to_index = self.axes_manager.navigation_dimension - 2\n for i, (Xray_line, window) in enumerate(zip(xray_lines, integration_windows)):\n element, line = utils_eds._get_element_and_line(Xray_line)\n line_energy = self._get_line_energy(Xray_line)\n # Replace with `map` function for lazy large datasets\n img = self.isig[window[0] : window[1]].integrate1D(\n -1\n ) # integrate over window.\n if np.issubdtype(img.data.dtype, np.integer):\n # The operations below require a float dtype with the default\n # numpy casting rule ('same_kind')\n img.change_dtype(\"float\")\n if background_windows is not None:\n bw = background_windows[i]\n # TODO: test to prevent slicing bug. To be reomved when fixed\n indexes = [float(ax.value2index(de)) for de in list(bw) + window]\n if indexes[0] == indexes[1]:\n bck1 = self.isig[bw[0]]\n else:\n bck1 = self.isig[bw[0] : bw[1]].integrate1D(-1)\n if indexes[2] == indexes[3]:\n bck2 = self.isig[bw[2]]\n else:\n bck2 = self.isig[bw[2] : bw[3]].integrate1D(-1)\n corr_factor = (indexes[5] - indexes[4]) / (\n (indexes[1] - indexes[0]) + (indexes[3] - indexes[2])\n )\n img = img - (bck1 + bck2) * corr_factor\n img.metadata.General.title = (\n f\"X-ray line intensity of {self.metadata.General.title}: \"\n f\"{Xray_line} at {line_energy:.2f} \"\n f\"{self.axes_manager.signal_axes[0].units}\"\n )\n img = img.transpose(signal_axes=[])\n if plot_result and img.axes_manager.navigation_size == 1:\n if img._lazy:\n img.compute()\n print(\n f\"{Xray_line} at {line_energy} {ax.units} : \"\n f\"Intensity = {img.data[0]:.2f}\"\n )\n img.metadata.set_item(\"Sample.elements\", ([element]))\n img.metadata.set_item(\"Sample.xray_lines\", ([Xray_line]))\n intensities.append(img)\n if plot_result and img.axes_manager.navigation_size != 1:\n utils.plot.plot_signals(intensities, **kwargs)\n return intensities\n\n def get_take_off_angle(self):\n \"\"\"Calculate the take-off-angle (TOA).\n\n TOA is the angle with which the X-rays leave the surface towards\n the detector. Parameters are read in 'SEM.Stage.tilt_alpha',\n 'Acquisition_instrument.SEM.Detector.EDS.azimuth_angle' and\n 'SEM.Detector.EDS.elevation_angle' and 'SEM.Stage.tilt_beta in\n 'metadata'.\n\n Returns\n -------\n take_off_angle: float\n in Degree\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.get_take_off_angle()\n 37.0\n >>> s.set_microscope_parameters(tilt_stage=20.)\n >>> s.get_take_off_angle()\n 57.0\n\n See also\n --------\n hs.eds.take_off_angle\n \"\"\"\n if self.metadata.Signal.signal_type == \"EDS_SEM\":\n mp = self.metadata.Acquisition_instrument.SEM\n elif self.metadata.Signal.signal_type == \"EDS_TEM\":\n mp = self.metadata.Acquisition_instrument.TEM\n\n tilt_stage = mp.get_item(\"Stage.tilt_alpha\", None)\n azimuth_angle = mp.get_item(\"Detector.EDS.azimuth_angle\", None)\n elevation_angle = mp.get_item(\"Detector.EDS.elevation_angle\", None)\n beta_tilt = mp.get_item(\"Stage.tilt_beta\", 0.0)\n\n return utils_eds.take_off_angle(\n tilt_stage, azimuth_angle, elevation_angle, beta_tilt\n )\n\n def estimate_integration_windows(self, windows_width=2.0, xray_lines=None):\n \"\"\"\n Estimate a window of integration for each X-ray line.\n\n Parameters\n ----------\n windows_width: float\n The width of the integration windows is the 'windows_width' times\n the calculated FWHM of the line.\n xray_lines: None or list of string\n If None, use 'metadata.Sample.elements.xray_lines'. Else,\n provide an iterable containing a list of valid X-ray lines\n symbols.\n\n Return\n ------\n integration_windows: 2D array of float\n The positions of the windows in energy. Each row corresponds to a\n X-ray line. Each row contains the left and right value of the\n window.\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.add_lines()\n >>> iw = s.estimate_integration_windows()\n >>> s.plot(integration_windows=iw)\n >>> s.get_lines_intensity(integration_windows=iw, plot_result=True)\n Fe_Ka at 6.4039 keV : Intensity = 3710.00\n Pt_La at 9.4421 keV : Intensity = 15872.00\n\n See also\n --------\n plot, get_lines_intensity\n \"\"\"\n xray_lines = self._get_xray_lines(xray_lines)\n integration_windows = []\n for Xray_line in xray_lines:\n line_energy, line_FWHM = self._get_line_energy(Xray_line, FWHM_MnKa=\"auto\")\n element, line = utils_eds._get_element_and_line(Xray_line)\n det = windows_width * line_FWHM / 2.0\n integration_windows.append([line_energy - det, line_energy + det])\n return integration_windows\n\n def estimate_background_windows(\n self, line_width=[2, 2], windows_width=1, xray_lines=None\n ):\n \"\"\"\n Estimate two windows around each X-ray line containing only the\n background.\n\n Parameters\n ----------\n line_width: list of two floats\n The position of the two windows around the X-ray line is given by\n the `line_width` (left and right) times the calculated FWHM of the\n line.\n windows_width: float\n The width of the windows is is the `windows_width` times the\n calculated FWHM of the line.\n xray_lines: None or list of string\n If None, use `metadata.Sample.elements.xray_lines`. Else,\n provide an iterable containing a list of valid X-ray lines\n symbols.\n\n Return\n ------\n windows_position: 2D array of float\n The position of the windows in energy. Each line corresponds to a\n X-ray line. In a line, the two first values correspond to the\n limits of the left window and the two last values correspond to\n the limits of the right window.\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.add_lines()\n >>> bw = s.estimate_background_windows(line_width=[5.0, 2.0])\n >>> s.plot(background_windows=bw)\n >>> s.get_lines_intensity(background_windows=bw, plot_result=True)\n Fe_Ka at 6.4039 keV : Intensity = 2754.00\n Pt_La at 9.4421 keV : Intensity = 15090.00\n\n See also\n --------\n plot, get_lines_intensity\n \"\"\"\n xray_lines = self._get_xray_lines(xray_lines)\n windows_position = []\n for xray_line in xray_lines:\n line_energy, line_FWHM = self._get_line_energy(xray_line, FWHM_MnKa=\"auto\")\n tmp = [\n line_energy - line_FWHM * line_width[0] - line_FWHM * windows_width,\n line_energy - line_FWHM * line_width[0],\n line_energy + line_FWHM * line_width[1],\n line_energy + line_FWHM * line_width[1] + line_FWHM * windows_width,\n ]\n windows_position.append(tmp)\n windows_position = np.array(windows_position)\n # merge ovelapping windows\n index = windows_position.argsort(axis=0)[:, 0]\n for i in range(len(index) - 1):\n ia, ib = index[i], index[i + 1]\n if windows_position[ia, 2] > windows_position[ib, 0]:\n interv = np.append(windows_position[ia, :2], windows_position[ib, 2:])\n windows_position[ia] = interv\n windows_position[ib] = interv\n return windows_position\n\n def plot(\n self,\n xray_lines=False,\n only_lines=(\"a\", \"b\"),\n only_one=False,\n background_windows=None,\n integration_windows=None,\n navigator=\"auto\",\n plot_markers=True,\n autoscale=\"v\",\n norm=\"auto\",\n axes_manager=None,\n navigator_kwds={},\n **kwargs,\n ):\n \"\"\"Plot the EDS spectrum. The following markers can be added\n\n - The position of the X-ray lines and their names.\n - The background windows associated with each X-ray lines. A black line\n links the left and right window with the average value in each window.\n\n Parameters\n ----------\n xray_lines: {False, True, 'from_elements', list of string}\n If not False, indicate the position and the name of the X-ray\n lines.\n If True, if `metadata.Sample.elements.xray_lines` contains a\n list of lines use those. If `metadata.Sample.elements.xray_lines`\n is undefined or empty or if xray_lines equals 'from_elements' and\n `metadata.Sample.elements` is defined, use the same syntax as\n `add_line` to select a subset of lines for the operation.\n Alternatively, provide an iterable containing a list of valid X-ray\n lines symbols.\n only_lines : None or list of strings\n If not None, use only the given lines (eg. ('a','Kb')).\n If None, use all lines.\n only_one : bool\n If False, use all the lines of each element in the data spectral\n range. If True use only the line at the highest energy\n above an overvoltage of 2 (< beam energy / 2).\n background_windows: None or 2D array of float\n If not None, add markers at the position of the windows in energy.\n Each line corresponds to a X-ray lines. In a line, the two first\n value corresponds to the limit of the left window and the two\n last values corresponds to the limit of the right window.\n integration_windows: None or 'auto' or float or 2D array of float\n If not None, add markers at the position of the integration\n windows.\n If 'auto' (or float), the width of the integration windows is 2.0\n (or float) times the calculated FWHM of the line. see\n 'estimate_integration_windows'.\n Else provide an array for which each row corresponds to a X-ray\n line. Each row contains the left and right value of the window.\n %s\n %s\n\n Examples\n --------\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.plot()\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.plot(True)\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.add_lines()\n >>> bw = s.estimate_background_windows()\n >>> s.plot(background_windows=bw)\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.plot(['Mn_Ka'], integration_windows='auto')\n\n >>> s = exspy.data.EDS_SEM_TM002()\n >>> s.add_lines()\n >>> bw = s.estimate_background_windows()\n >>> s.plot(background_windows=bw, integration_windows=2.1)\n\n See also\n --------\n set_elements, add_elements, estimate_integration_windows,\n get_lines_intensity, estimate_background_windows\n \"\"\"\n super().plot(\n navigator=navigator,\n plot_markers=plot_markers,\n autoscale=autoscale,\n norm=norm,\n axes_manager=axes_manager,\n navigator_kwds=navigator_kwds,\n **kwargs,\n )\n self._plot_xray_lines(\n xray_lines,\n only_lines,\n only_one,\n background_windows,\n integration_windows,\n render_figure=False,\n )\n self._render_figure(plot=[\"signal_plot\"])\n\n plot.__doc__ %= (BASE_PLOT_DOCSTRING_PARAMETERS, PLOT1D_DOCSTRING)\n\n def _plot_xray_lines(\n self,\n xray_lines=False,\n only_lines=(\"a\", \"b\"),\n only_one=False,\n background_windows=None,\n integration_windows=None,\n render_figure=True,\n ):\n if (\n xray_lines is not False\n or background_windows is not None\n or integration_windows is not None\n ):\n if xray_lines is False:\n xray_lines = True\n only_lines = utils_eds._parse_only_lines(only_lines)\n if xray_lines is True or xray_lines == \"from_elements\":\n if (\n \"Sample.xray_lines\" in self.metadata\n and xray_lines != \"from_elements\"\n ):\n xray_lines = self.metadata.Sample.xray_lines\n elif \"Sample.elements\" in self.metadata:\n xray_lines = self._get_lines_from_elements(\n self.metadata.Sample.elements,\n only_one=only_one,\n only_lines=only_lines,\n )\n else:\n _logger.warning(\"No elements defined, set them with `add_elements`\")\n # No X-rays lines, nothing to do then\n return\n\n xray_lines, xray_not_here = self._get_xray_lines_in_spectral_range(\n xray_lines\n )\n for xray in xray_not_here:\n _logger.warning(f\"{xray} is not in the data energy range.\")\n\n xray_lines = np.unique(xray_lines)\n\n self.add_xray_lines_markers(xray_lines, render_figure=False)\n if background_windows is not None:\n self._add_background_windows_markers(\n background_windows, render_figure=False\n )\n if integration_windows is not None:\n if integration_windows == \"auto\":\n integration_windows = 2.0\n if hasattr(integration_windows, \"__iter__\") is False:\n integration_windows = self.estimate_integration_windows(\n windows_width=integration_windows, xray_lines=xray_lines\n )\n self._add_vertical_lines_groups(\n integration_windows, linestyle=\"--\", render_figure=False\n )\n # Render figure only at the end\n if render_figure:\n self._render_figure(plot=[\"signal_plot\"])\n\n def _add_vertical_lines_groups(self, position, render_figure=True, **kwargs):\n \"\"\"\n Add vertical markers for each group that shares the color.\n\n Parameters\n ----------\n position: 2D array of float\n The position on the signal axis. Each row corresponds to a\n group.\n kwargs\n keywords argument for :py:class:`~.api.plot.markers.VerticalLine`\n \"\"\"\n colors = itertools.cycle(\n np.sort(plt.rcParams[\"axes.prop_cycle\"].by_key()[\"color\"])\n )\n\n for x, color in zip(position, colors):\n line = VerticalLines(offsets=x, color=color, **kwargs)\n self.add_marker(line, render_figure=False)\n if render_figure:\n self._render_figure(plot=[\"signal_plot\"])\n\n def add_xray_lines_markers(self, xray_lines, render_figure=True):\n \"\"\"\n Add marker on a spec.plot() with the name of the selected X-ray\n lines\n\n Parameters\n ----------\n xray_lines: list of string\n A valid list of X-ray lines\n \"\"\"\n if self._plot is None or not self._plot.is_active:\n raise RuntimeError(\"The signal needs to be plotted.\")\n norm = self._plot.signal_plot.ax_lines[0].norm\n minimum_intensity = self.data[self.data > 0].min() if norm == \"log\" else 0\n line_names = []\n segments = np.empty((len(xray_lines), 2, 2))\n offsets = np.empty((len(xray_lines), 2))\n # might want to set the intensity based on the alpha line intensity\n for i, xray_line in enumerate(xray_lines):\n element, line = utils_eds._get_element_and_line(xray_line)\n relative_factor = elements_db[element][\"Atomic_properties\"][\"Xray_lines\"][\n line\n ][\"weight\"]\n eng = self._get_line_energy(f\"{element}_{line}\")\n segments[i] = [[eng, 0], [eng, 1]]\n offsets[i] = [eng, 1]\n line_names.append(\n r\"$\\mathrm{%s}_{\\mathrm{%s}}$\"\n % utils_eds._get_element_and_line(xray_line)\n )\n\n line_markers = Lines(\n segments=segments,\n transform=\"relative\",\n color=\"black\",\n )\n text_markers = Texts(\n offsets=offsets,\n texts=line_names,\n offset_transform=\"relative\",\n rotation=np.pi / 2,\n horizontalalignment=\"left\",\n verticalalignment=\"bottom\",\n facecolor=\"black\",\n shift=0.005,\n )\n\n self.add_marker(line_markers, render_figure=False)\n self.add_marker(text_markers, render_figure=False)\n\n # Connect events to remove the markers when the line is closed\n line_markers.events.closed.connect(self._xray_marker_closed)\n text_markers.events.closed.connect(self._xray_marker_closed)\n self._xray_markers[\"lines\"] = line_markers\n self._xray_markers[\"texts\"] = text_markers\n self._xray_markers[\"names\"] = xray_lines\n\n if render_figure:\n self._render_figure(plot=[\"signal_plot\"])\n\n def _xray_marker_closed(self, obj):\n self._xray_markers = {}\n\n def remove_xray_lines_markers(self, xray_lines, render_figure=True):\n \"\"\"\n Remove marker previously added on a spec.plot() with the name of the\n selected X-ray lines\n\n Parameters\n ----------\n xray_lines: list of string\n A valid list of X-ray lines to remove\n render_figure: bool\n If True, render the figure after removing the markers\n \"\"\"\n ind = np.where(np.isin(self._xray_markers[\"names\"], xray_lines))\n self._xray_markers[\"lines\"].remove_items(ind)\n self._xray_markers[\"texts\"].remove_items(ind)\n self._xray_markers[\"names\"] = np.delete(self._xray_markers[\"names\"], ind)\n if render_figure:\n self._render_figure(plot=[\"signal_plot\"])\n\n def _add_background_windows_markers(self, windows_position, render_figure=True):\n \"\"\"\n Plot the background windows associated with each X-ray lines.\n\n For X-ray lines, a black line links the left and right window with the\n average value in each window.\n\n Parameters\n ----------\n windows_position: 2D array of float\n The position of the windows in energy. Each line corresponds to a\n X-ray lines. In a line, the two first value corresponds to the\n limit of the left window and the two last values corresponds to the\n limit of the right window.\n\n See also\n --------\n estimate_background_windows, get_lines_intensity\n \"\"\"\n self._add_vertical_lines_groups(windows_position)\n ax = self.axes_manager.signal_axes[0]\n segments = []\n for bw in windows_position:\n # TODO: test to prevent slicing bug. To be removed when fixed\n if ax.value2index(bw[0]) == ax.value2index(bw[1]):\n y1 = self.isig[bw[0]].data\n else:\n y1 = self.isig[bw[0] : bw[1]].mean(-1).data\n if ax.value2index(bw[2]) == ax.value2index(bw[3]):\n y2 = self.isig[bw[2]].data\n else:\n y2 = self.isig[bw[2] : bw[3]].mean(-1).data\n x1 = (bw[0] + bw[1]) / 2.0\n x2 = (bw[2] + bw[3]) / 2.0\n segments.append([[x1, y1[0]], [x2, y2[0]]])\n segments = np.array(segments)\n lines = Lines(segments=segments, color=\"black\")\n self.add_marker(lines, render_figure=False)\n if render_figure:\n self._render_figure(plot=[\"signal_plot\"])" }, { "identifier": "elements", "path": "exspy/misc/elements.py", "snippet": "" }, { "identifier": "utils", "path": "exspy/misc/eds/utils.py", "snippet": "_ABSORPTION_CORRECTION_DOCSTRING = \"\"\"absorption_correction : numpy.ndarray or None\n If None (default), absorption correction is ignored, otherwise, the\n array must contain values between 0 and 1 to correct the intensities\n based on estimated absorption.\n\"\"\"\n Z = elements_db[element][\"General_properties\"][\"Z\"]\n A = elements_db[element][\"General_properties\"][\"atomic_weight\"]\ndef _get_element_and_line(xray_line):\ndef _get_energy_xray_line(xray_line):\ndef _get_xray_lines_family(xray_line):\ndef _parse_only_lines(only_lines):\ndef get_xray_lines_near_energy(energy, width=0.2, only_lines=None):\ndef get_FWHM_at_Energy(energy_resolution_MnKa, E):\ndef xray_range(xray_line, beam_energy, density=\"auto\"):\ndef electron_range(element, beam_energy, density=\"auto\", tilt=0):\ndef take_off_angle(tilt_stage, azimuth_angle, elevation_angle, beta_tilt=0.0):\ndef xray_lines_model(\n elements,\n beam_energy=200,\n weight_percents=None,\n energy_resolution_MnKa=130,\n energy_axis=None,\n):\ndef quantification_cliff_lorimer(\n intensities, kfactors, absorption_correction=None, mask=None\n):\ndef _quantification_cliff_lorimer(\n intensities, kfactors, absorption_correction, ref_index=0, ref_index2=1\n):\ndef quantification_zeta_factor(intensities, zfactors, dose, absorption_correction=None):\ndef get_abs_corr_zeta(weight_percent, mass_thickness, take_off_angle):\ndef quantification_cross_section(\n intensities, cross_sections, dose, absorption_correction=None\n):\ndef get_abs_corr_cross_section(\n composition, number_of_atoms, take_off_angle, probe_area\n):\ndef edx_cross_section_to_zeta(cross_sections, elements):\ndef zeta_to_edx_cross_section(zfactors, elements):" } ]
import warnings import numpy as np import math import logging import hyperspy.components1d as create_component from hyperspy.misc.utils import stash_active_state from exspy.misc.eds.utils import _get_element_and_line from hyperspy.models.model1d import Model1D from exspy.signals.eds import EDSSpectrum from exspy.misc.elements import elements as elements_db from exspy.misc.eds import utils as utils_eds from hyperspy import utils
13,163
below. Parameters ---------- energy_resolution_MnKa : float Energy resolution of Mn Ka in eV E : float Energy of the peak in keV Returns ------- float : FWHM of the peak in keV Notes ----- This method implements the equation derived by Fiori and Newbury as is documented in the following: Fiori, C. E., and Newbury, D. E. (1978). In SEM/1978/I, SEM, Inc., AFM O'Hare, Illinois, p. 401. Goldstein et al. (2003). "Scanning Electron Microscopy & X-ray Microanalysis", Plenum, third edition, p 315. """ energy2sigma_factor = 2.5 / (eV2keV * (sigma2fwhm**2)) if return_f: return lambda sig_ref: math.sqrt( abs(energy2sigma_factor * (E - E_ref) * units_factor + np.power(sig_ref, 2)) ) else: return "sqrt(abs({} * ({} - {}) * {} + sig_ref ** 2))".format( energy2sigma_factor, E, E_ref, units_factor ) def _get_offset(diff): return "x + {}".format(diff) def _get_scale(E1, E_ref1, fact): return "{} + {} * (x - {})".format(E1, fact, E_ref1) class EDSModel(Model1D): """Build and fit a model of an EDS Signal1D. Parameters ---------- spectrum : EDSSpectrum (or any EDSSpectrum subclass) instance. auto_add_lines : bool If True, automatically add Gaussians for all X-rays generated in the energy range by an element, using the edsmodel.add_family_lines method. auto_background : bool If True, adds automatically a polynomial order 6 to the model, using the edsmodel.add_polynomial_background method. Any extra arguments are passed to the Model creator. Example ------- >>> m = s.create_model() >>> m.fit() >>> m.fit_background() >>> m.calibrate_energy_axis('resolution') >>> m.calibrate_xray_lines('energy', ['Au_Ma']) >>> m.calibrate_xray_lines('sub_weight',['Mn_La'], bound=10) """ def __init__( self, spectrum, auto_background=True, auto_add_lines=True, *args, **kwargs ): Model1D.__init__(self, spectrum, *args, **kwargs) self.xray_lines = list() self.family_lines = list() end_energy = self.axes_manager.signal_axes[0].high_value self.end_energy = min(end_energy, self.signal._get_beam_energy()) self.start_energy = self.axes_manager.signal_axes[0].low_value self.background_components = list() if "dictionary" in kwargs or len(args) > 1: auto_add_lines = False auto_background = False d = args[1] if len(args) > 1 else kwargs["dictionary"] if len(d["xray_lines"]) > 0: self.xray_lines.extend([self[name] for name in d["xray_lines"]]) if len(d["background_components"]) > 0: self.background_components.extend( [self[name] for name in d["background_components"]] ) if auto_background is True: self.add_polynomial_background() if auto_add_lines is True: # Will raise an error if no elements are specified, so check: if "Sample.elements" in self.signal.metadata: self.add_family_lines() def as_dictionary(self, fullcopy=True): dic = super(EDSModel, self).as_dictionary(fullcopy) dic["xray_lines"] = [c.name for c in self.xray_lines] dic["background_components"] = [c.name for c in self.background_components] return dic @property def units_factor(self): units_name = self.axes_manager.signal_axes[0].units if units_name == "eV": return 1000.0 elif units_name == "keV": return 1.0 else: raise ValueError("Energy units, %s, not supported" % str(units_name)) @property def spectrum(self): return self._signal @spectrum.setter def spectrum(self, value):
# -*- coding: utf-8 -*- # Copyright 2007-2023 The exSpy developers # # This file is part of exSpy. # # exSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # exSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with exSpy. If not, see <https://www.gnu.org/licenses/#GPL>. from __future__ import division _logger = logging.getLogger(__name__) eV2keV = 1000.0 sigma2fwhm = 2 * math.sqrt(2 * math.log(2)) def _get_weight(element, line, weight_line=None): if weight_line is None: weight_line = elements_db[element]["Atomic_properties"]["Xray_lines"][line][ "weight" ] return "x * {}".format(weight_line) def _get_sigma(E, E_ref, units_factor, return_f=False): """ Calculates an approximate sigma value, accounting for peak broadening due to the detector, for a peak at energy E given a known width at a reference energy. The factor 2.5 is a constant derived by Fiori & Newbury as references below. Parameters ---------- energy_resolution_MnKa : float Energy resolution of Mn Ka in eV E : float Energy of the peak in keV Returns ------- float : FWHM of the peak in keV Notes ----- This method implements the equation derived by Fiori and Newbury as is documented in the following: Fiori, C. E., and Newbury, D. E. (1978). In SEM/1978/I, SEM, Inc., AFM O'Hare, Illinois, p. 401. Goldstein et al. (2003). "Scanning Electron Microscopy & X-ray Microanalysis", Plenum, third edition, p 315. """ energy2sigma_factor = 2.5 / (eV2keV * (sigma2fwhm**2)) if return_f: return lambda sig_ref: math.sqrt( abs(energy2sigma_factor * (E - E_ref) * units_factor + np.power(sig_ref, 2)) ) else: return "sqrt(abs({} * ({} - {}) * {} + sig_ref ** 2))".format( energy2sigma_factor, E, E_ref, units_factor ) def _get_offset(diff): return "x + {}".format(diff) def _get_scale(E1, E_ref1, fact): return "{} + {} * (x - {})".format(E1, fact, E_ref1) class EDSModel(Model1D): """Build and fit a model of an EDS Signal1D. Parameters ---------- spectrum : EDSSpectrum (or any EDSSpectrum subclass) instance. auto_add_lines : bool If True, automatically add Gaussians for all X-rays generated in the energy range by an element, using the edsmodel.add_family_lines method. auto_background : bool If True, adds automatically a polynomial order 6 to the model, using the edsmodel.add_polynomial_background method. Any extra arguments are passed to the Model creator. Example ------- >>> m = s.create_model() >>> m.fit() >>> m.fit_background() >>> m.calibrate_energy_axis('resolution') >>> m.calibrate_xray_lines('energy', ['Au_Ma']) >>> m.calibrate_xray_lines('sub_weight',['Mn_La'], bound=10) """ def __init__( self, spectrum, auto_background=True, auto_add_lines=True, *args, **kwargs ): Model1D.__init__(self, spectrum, *args, **kwargs) self.xray_lines = list() self.family_lines = list() end_energy = self.axes_manager.signal_axes[0].high_value self.end_energy = min(end_energy, self.signal._get_beam_energy()) self.start_energy = self.axes_manager.signal_axes[0].low_value self.background_components = list() if "dictionary" in kwargs or len(args) > 1: auto_add_lines = False auto_background = False d = args[1] if len(args) > 1 else kwargs["dictionary"] if len(d["xray_lines"]) > 0: self.xray_lines.extend([self[name] for name in d["xray_lines"]]) if len(d["background_components"]) > 0: self.background_components.extend( [self[name] for name in d["background_components"]] ) if auto_background is True: self.add_polynomial_background() if auto_add_lines is True: # Will raise an error if no elements are specified, so check: if "Sample.elements" in self.signal.metadata: self.add_family_lines() def as_dictionary(self, fullcopy=True): dic = super(EDSModel, self).as_dictionary(fullcopy) dic["xray_lines"] = [c.name for c in self.xray_lines] dic["background_components"] = [c.name for c in self.background_components] return dic @property def units_factor(self): units_name = self.axes_manager.signal_axes[0].units if units_name == "eV": return 1000.0 elif units_name == "keV": return 1.0 else: raise ValueError("Energy units, %s, not supported" % str(units_name)) @property def spectrum(self): return self._signal @spectrum.setter def spectrum(self, value):
if isinstance(value, EDSSpectrum):
1
2023-10-28 20:04:10+00:00
16k
swyoon/variationally-weighted-kernel-density-estimation
train.py
[ { "identifier": "find_optimal_bandwidth", "path": "KDE.py", "snippet": "def find_optimal_bandwidth(X, l_h, gpu=True, lik=True):\n l_lik = []\n for h in l_h:\n kde = KDE(h=h, gpu=gpu)\n kde.fit(X)\n p_loo = kde.p_loo()\n f_sq = kde.f_sq()\n if lik:\n lik = np.log(p_loo).mean()\n l_lik.append(lik)\n else:\n ise = - (f_sq - 2 * p_loo.mean())\n l_lik.append(ise)\n\n max_arg = np.argmax(l_lik)\n return l_h[max_arg]" }, { "identifier": "KernelRatioNaive", "path": "ratio.py", "snippet": "class KernelRatioNaive(KernelRatio):\n \"\"\"kernel density estimate plug-in estimator\"\"\"\n def __init__(self, h=0.6, gpu=False):\n self.h = h\n self.gpu = gpu \n\n def fit(self, X1, X2):\n xp = self.xp()\n self.N1_, self.D_ = X1.shape\n self.N2_, _ = X2.shape\n # self._separate_class(X, y)\n self.a1_ = xp.ones(self.N1_)\n self.a2_ = xp.ones(self.N2_)\n self.X1_ = xp.asarray(X1)\n self.X2_ = xp.asarray(X2)" }, { "identifier": "KernelRatioAlpha", "path": "ratio.py", "snippet": "class KernelRatioAlpha(KernelRatio):\n \"\"\"kernel density estimate plug-in estimator\"\"\"\n def __init__(self, h=0.6, gpu=True):\n self.h = h\n self.gpu = gpu \n\n def fit(self, X1, X2, a1, a2):\n xp = self.xp()\n self.N1_, self.D_ = X1.shape\n self.N2_, _ = X2.shape\n # self._separate_class(X, y)\n self.a1_ = xp.asarray(a1)\n self.a2_ = xp.asarray(a2)\n self.X1_ = xp.asarray(X1)\n self.X2_ = xp.asarray(X2)" }, { "identifier": "KernelRatioGaussian", "path": "ratio.py", "snippet": "class KernelRatioGaussian(KernelRatio):\n def __init__(self, h=0.6, s=0.5, gp_h=0.6, gp_l=0.1, reg=0.1, grid_sample=None, gpu=False, einsum_batch=200,\n kmeans=False, stabilize=False, solver='gp', para_h=0.6, para_l=0.1, online=False,\n trunc=None):\n self.h = h\n self.s = s\n self.gp_h = gp_h\n self.gp_l = gp_l\n self.reg = reg # gaussian covariance regularization parameter\n self.grid_sample = grid_sample # number of data point used in PDE solving\n self.gpu = gpu \n self.einsum_batch = einsum_batch\n self.kmeans = kmeans\n self.stabilize = stabilize\n self.solver = solver\n self.para_h = para_h\n self.para_l = para_l\n self.online = online # If true, kernel matrix is computed online. slower but saves memory.\n self.trunc = trunc \n\n def fit(self, X1, X2, true_model=None):\n xp = self.xp()\n self.X1_ = xp.asarray(X1)\n self.X2_ = xp.asarray(X2)\n if self.gpu and true_model is not None:\n l_true_model = []\n for tm in true_model:\n tm_ = copy.deepcopy(tm)\n tm_.switch_gpu()\n l_true_model.append(tm_)\n self.true_model = l_true_model\n else:\n self.true_model = true_model\n\n if self.kmeans:\n self.X1_cpu = X1\n self.X2_cpu = X2\n self.N1_, self.D_ = X1.shape\n self.N2_, _ = X2.shape\n if self.solver == 'gp':\n self._fit_weights()\n elif self.solver == 'para':\n self._fit_weights_para()\n elif self.solver == 'analytic':\n self._fit_weights_analytic()\n elif self.solver == 'analytic_v2':\n self._fit_weights_analytic_v2()\n elif self.solver == 'para_cls':\n self._fit_weights_para_cls()\n else:\n raise ValueError\n\n def _fit_gaussians(self):\n if self.true_model is not None:\n self.gaussians_ = self.true_model\n else:\n g1 = GaussianModel(reg=self.reg, gpu=self.gpu)\n g2 = GaussianModel(reg=self.reg, gpu=self.gpu)\n g1.fit(self.X1_, internal=True)\n g2.fit(self.X2_, internal=True)\n self.gaussians_ = [g1, g2]\n\n def _get_grid(self):\n \"\"\"return grid points on which the partial differential equation will be solved\"\"\"\n xp = self.xp()\n if self.grid_sample is None:\n # use all points\n X = xp.vstack([self.X1_, self.X2_])\n elif self.kmeans:\n from sklearn.cluster import KMeans\n if self.grid_sample >= self.N1_:\n grid1 = self.X1_cpu\n else:\n km = KMeans(n_clusters=self.grid_sample)\n km.fit(self.X1_cpu)\n grid1 = km.cluster_centers_\n\n if self.grid_sample >= self.N2_:\n grid2 = self.X2_cpu\n else:\n km = KMeans(n_clusters=self.grid_sample)\n km.fit(self.X2_cpu)\n grid2 = km.cluster_centers_\n\n X = np.vstack([grid1, grid2])\n X = xp.asarray(X)\n else:\n idx1 = xp.arange(self.N1_)\n idx2 = xp.arange(self.N2_)\n xp.random.shuffle(idx1)\n xp.random.shuffle(idx2)\n idx1 = idx1[:int(self.grid_sample)]\n idx2 = idx2[:int(self.grid_sample)]\n X = xp.vstack([self.X1_[idx1], self.X2_[idx2]])\n self.grid_ = X\n X = xp.asarray(X)\n return X\n\n def _get_deriv(self, X=None):\n \"\"\"compute density derivatives\"\"\"\n xp = self.xp()\n self._fit_gaussians()\n g1, g2 = self.gaussians_\n\n if X is None:\n X = self.grid_\n du = g1.grad_over_p(X) - g2.grad_over_p(X) # (N, D)\n v = g1.lap_over_p(X) - g2.lap_over_p(X)\n if self.solver in {'para', 'para_cls'} :\n return du, v\n Hu = g1.grad_grad_over_p(X) - g2.grad_grad_over_p(X) # (N, D, D)\n # du_Hu = xp.einsum('ij,ijk->ik', du, Hu) # todo\n du_Hu = (du.reshape(du.shape + (1,)) * Hu).sum(axis=1)\n Lu = g1.tr_grad_grad_over_p(X) - g2.tr_grad_grad_over_p(X)\n dv = g1.grad_lap_over_p(X) - g2.grad_lap_over_p(X)\n return du, v, Hu, du_Hu, Lu, dv\n\n def _fit_weights(self):\n import time\n time_1 = time.time()\n xp = self.xp()\n D = self.D_\n\n # sample points\n X = self._get_grid()\n\n # build operators\n time_2 = time.time()\n self.gp_ = GPDifferentialOperator(X, h=self.gp_h, l=self.gp_l, gpu=self.gpu)\n G = self.gp_.gradient()\n H, L = self.gp_.hessian(vec=True, lap=True)\n\n # compute coefficients\n time_3 = time.time()\n du, v, Hu, du_Hu, Lu, dv = self._get_deriv()\n\n # solve equation\n time_4 = time.time()\n W = self._get_cross_term_double_vec(D)\n U = xp.einsum('ij,ik->ijk', du, du)\n triu_idx = np.triu_indices(D)\n dudu_vec = U[:, triu_idx[0], triu_idx[1]]\n\n # einsum batches\n if isinstance(self.einsum_batch, int):\n n_grid = X.shape[0]\n n_batch = int(np.ceil(n_grid / self.einsum_batch))\n l_A = []\n l_b = []\n # time_a = time.time()\n for i_batch in range(n_batch):\n start = i_batch * self.einsum_batch\n if i_batch == (n_batch - 1):\n end = n_grid\n else:\n end = (i_batch + 1) * self.einsum_batch\n dudu_vec_ = dudu_vec[start:end]\n H_ = H[start:end]\n du_Hu_ = du_Hu[start:end]\n G_ = G[start:end]\n du_ = du[start:end]\n dv_ = dv[start:end]\n v_ = v[start:end]\n Lu_ = Lu[start:end]\n L_ = L[start:end]\n term_1_ = xp.einsum('ij,j->ij', dudu_vec_, W)\n term_1 = xp.einsum('ij,ijk->ik', term_1_, H_)\n term_2 = xp.einsum('ij,ijk->ik', du_Hu_, G_)\n term_3 = Lu_ * xp.einsum('ij,ijk->ik', du_, G_)\n term_4 = xp.einsum('ij,ij->i', dv_, du_)\n term_5 = v_ * Lu_.flatten()\n\n A_ = 2 * (term_1 + term_2 + term_3) + self.s * L_\n b_ = - (term_4 + term_5)\n l_A.append(A_)\n l_b.append(b_)\n # time_b = time.time()\n # print(time_b - time_a)\n A = xp.concatenate(l_A)\n b = xp.concatenate(l_b)\n # time_c = time.time()\n # print(time_c - time_b)\n\n elif self.einsum_batch == 'for':\n \"\"\"slower than einsum batch. einsum batch is slower when the batch size is small.\"\"\"\n l_A = []\n l_b = []\n n_grid = X.shape[0]\n W = xp.asarray(W)\n # time_a = time.time()\n for i in range(n_grid):\n dudu_vec_ = dudu_vec[i]\n H_ = H[i]\n du_Hu_ = du_Hu[i]\n G_ = G[i]\n du_ = du[i]\n dv_ = dv[i]\n v_ = v[i]\n Lu_ = Lu[i]\n L_ = L[i]\n\n term_1 = (dudu_vec_ * W).dot(H_)\n term_2 = du_Hu_.dot(G_)\n term_3 = Lu_ * du_.dot(G_)\n term_4 = dv_.dot(du_)\n term_5 = v_ * Lu_\n # print(term_1.shape, term_2.shape, term_3.shape, term_4.shape, term_5.shape)\n A_ = 2 * (term_1 + term_2 + term_3) + self.s * L_\n b_ = - (term_4 + term_5)\n\n l_A.append(A_)\n l_b.append(b_)\n\n # time_b = time.time()\n # print(time_b - time_a)\n A = xp.stack(l_A)\n b = xp.concatenate(l_b)\n # time_c = time.time()\n # print(time_c - time_b)\n\n else:\n term_1 = xp.einsum('ij,j,ijk->ik', dudu_vec, W, H)\n term_2 = xp.einsum('ij,ijk->ik', du_Hu, G)\n term_3 = Lu * xp.einsum('ij,ijk->ik', du, G)\n term_4 = xp.einsum('ij,ij->i', dv, du)\n term_5 = v * Lu.flatten()\n\n A = 2 * (term_1 + term_2 + term_3) + self.s * L\n b = - (term_4 + term_5)\n # print(v.shape, Lu.shape)\n assert A.shape == (X.shape[0], X.shape[0])\n assert b.shape == (X.shape[0],)\n\n # sol = sp.linalg.cho_solve(sp.linalg.cho_factor(A), b)\n sol = xp.linalg.inv(A).dot(b)\n\n self.G = G\n self.du = du\n self.v = v\n\n if self.stabilize:\n stable_max = -0.1\n shift = stable_max - sol.max()\n # print(sol.max(), shift)\n sol += shift\n # print(sol.max())\n\n # infer whole weights\n time_4 = time.time()\n if self.grid_sample is None:\n self.a1_ = xp.exp(sol[:self.N1_])\n self.a2_ = xp.exp(sol[self.N1_:])\n self.sol_ = sol\n else:\n sol1 = self.gp_.predict(sol, self.X1_)\n sol2 = self.gp_.predict(sol, self.X2_)\n self.a1_ = xp.exp(sol1)\n self.a2_ = xp.exp(sol2)\n self.sol_ = sol\n\n def get_diff_dist_K(self, basis, data, h=None):\n D = self.D_\n xp = self.xp()\n Xb = data\n if h is None:\n h = self.para_h\n Xi = Xb.reshape((1, data.shape[0], D))\n basis_ = basis.reshape((basis.shape[0], 1, D))\n diff = (Xi - basis_) # (MxBxD)\n dist = (diff ** 2).sum(axis=2) # (MxB,)\n if self.trunc is None:\n K = xp.exp(- dist / h ** 2 / 2) # (Mx B)\n else:\n K = xp.exp(- dist / h ** 2 / 2) # (Mx B)\n K[K<=np.exp(-0.5*self.trunc**2)] = 0\n return diff, dist, K\n\n def _fit_weights_para(self):\n # import time\n # mempool = cp.get_default_memory_pool()\n # print(mempool.used_bytes() / 1024 / 1024, mempool.total_bytes() / 1024 / 1024)\n # time_1 = time.time()\n xp = self.xp()\n D = self.D_\n X = xp.vstack([self.X1_, self.X2_])\n # X = xp.vstack([self.X1_])\n\n # sample points\n basis = self._get_grid()\n self.basis = basis\n\n # compute coefficients\n # time_3 = time.time()\n # print(mempool.used_bytes() / 1024 / 1024, mempool.total_bytes() / 1024 / 1024)\n du, v = self._get_deriv(X=X)\n # print(' deriv {:.4f}sec'.format(time_3 - time_1))\n # print(mempool.used_bytes() / 1024 / 1024, mempool.total_bytes() / 1024 / 1024)\n\n # compute kernel matrix\n # dist = self._cdist(basis, X)\n # diff = X.reshape((1, X.shape[0], X.shape[1])) - basis.reshape((basis.shape[0], 1, basis.shape[1]))\n # h = 0.8\n # K = np.exp(- dist / h**2 / 2)\n # dK = diff * K.reshape(K.shape + (1,))\n if not self.online:\n diff_, dist_, K_ = self.get_diff_dist_K(basis, X)\n\n # print('dist compt {:.4f}sec'.format(time.time() - time_1))\n # print(mempool.used_bytes() / 1024 / 1024)\n # einsum batches\n if isinstance(self.einsum_batch, int):\n batch_size = self.einsum_batch\n n_batch = int(np.ceil(X.shape[0] / batch_size))\n A = xp.zeros((basis.shape[0], basis.shape[0])) # M x M\n b = xp.zeros((basis.shape[0],))\n # C = np.zeros((basis.shape[0], basis.shape[0])) # M x M\n l_K = []\n\n basis_ = basis.reshape((basis.shape[0], 1, basis.shape[1]))\n for i_b in range(n_batch):\n b_s = i_b * batch_size\n b_e = min((i_b + 1) * batch_size, X.shape[0])\n B = b_e - b_s\n Xb = X[b_s:b_e]\n dub = du[b_s:b_e] # B x D\n vb = v[b_s:b_e]\n\n # kernel computation\n # Xi = Xb.reshape((1, B, D))\n # diff = (Xi - basis_) # (MxBxD)\n # dist = (diff ** 2).sum(axis=2) # (MxB,)\n # Ki = xp.exp(- dist / self.para_h ** 2 / 2) # (Mx B)\n if self.online:\n diff, dist, Ki = self.get_diff_dist_K(basis, Xb)\n else:\n diff = diff_[:, b_s:b_e, :]\n dist = dist_[:, b_s:b_e]\n Ki = K_[:, b_s:b_e]\n\n # dk\n dk = - Ki.reshape(Ki.shape + (1,)) * diff / self.para_h ** 2 # M x B x D\n dudk = (dub.reshape((1, B, D)) * dk).sum(axis=2) # MxB\n A += dudk.dot(dudk.T) # M x M\n b += (vb * dudk).sum(axis=1) # M\n # C += dk.T.dot(dk)\n l_K.append(Ki)\n\n A /= X.shape[0]\n b /= X.shape[0]\n # C /= X.shape[0]\n A *= 2\n K = xp.concatenate(l_K, axis=1)\n\n elif self.einsum_batch == 'for':\n \"\"\"slower than einsum batch. einsum batch is slower when the batch size is small.\"\"\"\n A = xp.zeros((basis.shape[0], basis.shape[0])) # M x M\n b = xp.zeros((basis.shape[0],))\n # C = np.zeros((basis.shape[0], basis.shape[0])) # M x M\n l_K = []\n\n basis_ = basis.reshape((basis.shape[0], 1, basis.shape[1]))\n for i in range(X.shape[0]):\n # kernel computation\n Xi = X[i].reshape((1, 1, X.shape[1]))\n diff = (Xi - basis_).sum(axis=1) # (MxD)\n dist = (diff ** 2).sum(axis=1) # (M,)\n Ki = xp.exp(- dist / self.para_h ** 2 / 2) # (M, )\n\n # dk\n dk = - Ki * diff.T / self.para_h ** 2 # D x M\n dudk = du[i].dot(dk) # M\n A += xp.outer(dudk, dudk)\n b += v[i] * dudk\n # C += dk.T.dot(dk)\n l_K.append(Ki)\n\n A /= X.shape[0]\n b /= X.shape[0]\n # C /= X.shape[0]\n A *= 2\n K = xp.stack(l_K, axis=1)\n else:\n raise ValueError\n assert A.shape == (basis.shape[0], basis.shape[0])\n assert b.shape == (basis.shape[0],)\n # time_4 = time.time()\n # print('{:.4f}sec'.format(time_4 - time_1))\n\n sol_w = - xp.linalg.inv(A + self.para_l * xp.eye(basis.shape[0])).dot(b)\n self.sol_w = sol_w\n # K2 = xp.exp(- self._cdist(basis, self.X2_) / self.para_h ** 2 / 2)\n # K = xp.hstack([K, K2])\n sol = sol_w.dot(K)\n self.sol_ = sol\n # time_5 = time.time()\n # print('{:.4f}sec'.format(time_5 - time_1))\n\n if self.stabilize:\n stable_max = -0.1\n shift = stable_max - sol.max()\n sol += shift\n\n # infer whole weights\n self.a1_ = xp.exp(sol[:self.N1_])\n self.a2_ = xp.exp(sol[self.N1_:])\n self.sol_ = sol\n # time_6 = time.time()\n # print('{:.4f}sec'.format(time_6 - time_1))\n\n def _fit_weights_para_cls(self):\n xp = self.xp()\n D = self.D_\n X = xp.vstack([self.X1_, self.X2_])\n\n # sample points\n basis = self._get_grid()\n self.basis = basis\n\n # compute coefficients\n du, v = self._get_deriv(X=X)\n g1, g2 = self.gaussians_\n\n # compute kernel matrix\n if not self.online:\n diff_, dist_, K_ = self.get_diff_dist_K(basis, X)\n\n # einsum batches\n if isinstance(self.einsum_batch, int):\n batch_size = self.einsum_batch\n n_batch = int(np.ceil(X.shape[0] / batch_size))\n A = xp.zeros((basis.shape[0], basis.shape[0])) # M x M\n b = xp.zeros((basis.shape[0],))\n l_K = []\n\n basis_ = basis.reshape((basis.shape[0], 1, basis.shape[1]))\n for i_b in range(n_batch):\n b_s = i_b * batch_size\n b_e = min((i_b + 1) * batch_size, X.shape[0])\n B = b_e - b_s\n Xb = X[b_s:b_e]\n dub = du[b_s:b_e] # B x D\n vb = v[b_s:b_e]\n\n # kernel computation\n if self.online:\n diff, dist, Ki = self.get_diff_dist_K(basis, Xb)\n else:\n diff = diff_[:, b_s:b_e, :]\n dist = dist_[:, b_s:b_e]\n Ki = K_[:, b_s:b_e]\n\n # prob\n p1 = g1.predict(Xb, internal=True)\n p2 = g2.predict(Xb, internal=True)\n coef = (p1 * p2 / (p1 + p2) ** 2)[None,:] # 1xB\n \n\n # dk\n dk = - Ki.reshape(Ki.shape + (1,)) * diff / self.para_h ** 2 # M x B x D\n dudk = (dub.reshape((1, B, D)) * dk).sum(axis=2) # MxB\n A += (dudk * coef).dot(dudk.T) # M x M\n b += (vb * dudk * coef).sum(axis=1) # M\n l_K.append(Ki)\n\n A /= X.shape[0]\n b /= X.shape[0]\n A *= 2\n K = xp.concatenate(l_K, axis=1)\n else:\n raise ValueError\n assert A.shape == (basis.shape[0], basis.shape[0])\n assert b.shape == (basis.shape[0],)\n\n sol_w = - xp.linalg.inv(A + self.para_l * xp.eye(basis.shape[0])).dot(b)\n self.sol_w = sol_w\n sol = sol_w.dot(K)\n self.sol_ = sol\n\n if self.stabilize:\n stable_max = -0.1\n shift = stable_max - sol.max()\n sol += shift\n\n # infer whole weights\n self.a1_ = xp.exp(sol[:self.N1_])\n self.a2_ = xp.exp(sol[self.N1_:])\n self.sol_ = sol\n\n\n def compute_alpha(self, new_X):\n xp = self.xp()\n if self.solver == 'para':\n dist = cdist(cp.asnumpy(self.basis), cp.asnumpy(new_X), metric='sqeuclidean')\n K = np.exp(- dist / self.para_h ** 2 / 2)\n beta = cp.asnumpy(self.sol_w).dot(K)\n return cp.asnumpy(np.exp(beta))\n else:\n return None\n\n def _fit_weights_analytic(self):\n xp = self.xp()\n # fit gaussians\n self._fit_gaussians()\n # compute pooled covariance and mean\n g1, g2 = self.gaussians_\n self.pooled_S = (g1.sig_ * len(self.X1_) + g2.sig_ * len(self.X2_)) / (len(self.X1_) + len(self.X2_))\n self.pooled_mu = (g1.mu_ + g2.mu_) / 2\n\n # compute analytic beta\n S_inv = xp.linalg.inv(self.pooled_S)\n xx1 = self.X1_ - self.pooled_mu\n xx2 = self.X2_ - self.pooled_mu\n beta1 = (xx1 * S_inv.dot(xx1.T).T).sum(axis=1) * 0.5\n beta2 = (xx2 * S_inv.dot(xx2.T).T).sum(axis=1) * 0.5\n\n # compute alpha\n self.a1_ = xp.exp(beta1)\n self.a2_ = xp.exp(beta2)\n\n def _fit_weights_analytic_v2(self):\n \"\"\"heteoscedastic gaussian assumption\"\"\"\n xp = self.xp()\n # fit gaussians\n self._fit_gaussians()\n # compute pooled covariance and mean\n g1, g2 = self.gaussians_\n\n # compute analytic beta\n S1inv = xp.linalg.inv(g1.sig_)\n S2inv = xp.linalg.inv(g2.sig_)\n pooled_S = (g1.sig_ + g2.sig_) / 2\n pooled_Sinv = xp.linalg.inv(pooled_S)\n mu1S1inv = g1.mu_.dot(S1inv)\n mu2S2inv = g2.mu_.dot(S2inv)\n b = (mu1S1inv + mu2S2inv) * 0.5\n\n xx1 = self.X1_\n xx2 = self.X2_\n beta1 = (xx1 * pooled_Sinv.dot(xx1.T).T).sum(axis=1) * 0.5 + (b * xx1).sum(axis=1)\n beta2 = (xx2 * pooled_Sinv.dot(xx2.T).T).sum(axis=1) * 0.5 + (b * xx2).sum(axis=1)\n\n # compute alpha\n self.a1_ = xp.exp(beta1)\n self.a2_ = xp.exp(beta2)\n\n def objective(self):\n w = cp.asnumpy(self.sol_w)\n A = self.A\n b = self.b\n obj = w.dot(A.dot(w)) + 2 * w.dot(b)\n reg = w.dot(w) * self.para_l\n return cp.asnumpy(obj), reg\n\n def run_kl_batch(self, X1, X2s, batch=3):\n \"\"\"compute KL divergence for a number of distributions simultaneously \"\"\"\n xp = self.xp()\n X1_ = xp.asarray(X1)\n # X2s_ = xp.asarray(X2s) do not transfer to GPU\n self.N1_, self.D_ = X1.shape\n self.M2_, self.N2_, _ = X2s.shape\n assert self.solver == 'para'\n n_batch = int(np.ceil(self.M2_ / batch))\n D = self.D_\n n_basis = self.N1_ + self.N2_\n l_kl = []\n\n # fit gaussians\n g1 = GaussianModel(reg=self.reg, gpu=self.gpu)\n g1.fit(X1_, internal=True)\n g1_grad_over_p = g1.grad_over_p(X1_)\n g1_lap_over_p = g1.lap_over_p(X1_)\n\n diff11, dist11, K11 = self.get_diff_dist_K(X1_, X1_, h=1)\n K11_p = K11 ** (1 / self.para_h**2)\n dk1 = - K11_p.reshape(K11_p.shape + (1,)) * diff11 / self.para_h ** 2 # M x B x D\n from tqdm import tqdm\n for b in tqdm(range(n_batch)):\n b_s = b * batch\n b_e = (b + 1) * batch if b != (n_batch - 1) else len(X2s)\n X2s_ = xp.asarray(X2s[b_s:b_e])\n diff21, dist21, K21 = self.get_diff_dist_K(X2s_.reshape(((b_e - b_s) * self.N2_, D)), X1_, h=1)\n # diff : X1 - X2\n\n l_g2 = []\n for i_x2, x2_ in enumerate(X2s_):\n x2_s = i_x2 * self.N2_ # start index\n x2_e = (i_x2 + 1) * self.N2_ # end index\n diff22, dist22, K22 = self.get_diff_dist_K(x2_, x2_, h=1)\n diff21_ = diff21[x2_s:x2_e] # X2 - X1\n dist21_ = dist21[x2_s:x2_e]\n K21_ = K21[x2_s:x2_e]\n\n g2 = GaussianModel(reg=self.reg, gpu=self.gpu)\n g2.fit(x2_, internal=True)\n l_g2.append(g2)\n\n # compute derivatives\n du = g1_grad_over_p - g2.grad_over_p(X1_) # (N, D)\n v = g1_lap_over_p - g2.lap_over_p(X1_)\n\n # diff = xp.concatenate([diff11, diff21_], axis=0) # (basis:N1+N2) x (N1) x (D)\n # Ki = xp.vstack([K11, K21_]) ** (1 / self.para_h ** 2) # \n Ki = K21_ ** (1 / self.para_h ** 2) # \n dub = du\n vb = v\n\n dk = - Ki.reshape(Ki.shape + (1,)) * diff21_ / self.para_h ** 2 # M x B x D\n dk = xp.vstack([dk1, dk])\n dudk = (dub.reshape((1, self.N1_, D)) * dk).sum(axis=2) # MxB\n A = 2 * dudk.dot(dudk.T) / self.N1_ # M x M\n b = (vb * dudk).sum(axis=1) / self.N1_ # M\n sol_w = - xp.linalg.inv(A + self.para_l * xp.eye(n_basis)).dot(b)\n K_rest = xp.vstack([K21_.T, K22]) ** (1 / self.para_h ** 2)\n Ki = xp.vstack([K11_p, Ki])\n K_whole = xp.hstack([Ki, K_rest])\n a = xp.exp(sol_w.dot(K_whole))\n a1 = a[:self.N1_]\n a2 = a[self.N1_:]\n\n # KL divergence computation\n K11_h = K11 ** (1 / self.h**2)\n diag_zero_K11_h = K11_h - xp.diag(xp.diag(K11_h))\n K21_h = K21_ ** (1 / self.h ** 2)\n f1 = a1.dot(diag_zero_K11_h) / (self.N1_ - 1)\n log_p1_loo = xp.log(f1)\n log_p2 = xp.log(a2.dot(K21_h) / (self.N2_))\n kl = cp.asnumpy((log_p1_loo - log_p2))\n kl = np.nanmean(kl)\n l_kl.append(kl)\n return np.array(l_kl)" }, { "identifier": "Score_network", "path": "model/energy.py", "snippet": "class Score_network(nn.Module):\n def __init__(\n self,\n input_dim,\n units,\n SiLU=True,\n dropout=True\n ):\n super().__init__()\n layers = []\n in_dim = input_dim\n for out_dim in units:\n layers.extend([\n nn.Linear(in_dim, out_dim),\n nn.SiLU() if SiLU else nn.ReLU(),\n nn.Dropout(.7) if dropout else nn.Identity()\n ])\n in_dim = out_dim\n layers.append(nn.Linear(in_dim, 1))\n\n self.net = nn.Sequential(*layers)\n \n def forward(self, x):\n return self.net(x)" }, { "identifier": "Weight_network", "path": "model/energy.py", "snippet": "class Weight_network(nn.Module):\n def __init__(\n self,\n input_dim,\n units,\n SiLU=True,\n dropout=True,\n ):\n super().__init__()\n layers = []\n in_dim = input_dim\n for out_dim in units[:-1]:\n layers.extend([\n nn.Linear(in_dim, out_dim),\n nn.SiLU() if SiLU else nn.ReLU(),\n nn.Dropout(.7) if dropout else nn.Identity()\n ])\n in_dim = out_dim\n\n layers.extend([\n nn.Linear(in_dim, units[-1]),\n nn.Sigmoid(),\n nn.Dropout(.5) if dropout else nn.Identity(),\n nn.Linear(units[-1], 1)\n ])\n\n self.net = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.net(x)" }, { "identifier": "Energy", "path": "model/energy.py", "snippet": "class Energy(nn.Module):\n def __init__(self, net):\n super().__init__()\n self.net = net\n\n def forward(self, x):\n return self.net(x)\n\n def score(self, x, sigma=None):\n x = x.requires_grad_()\n logp = -self.net(x).sum()\n return torch.autograd.grad(logp, x, create_graph=True)[0]\n \n def minus_forward(self, x):\n return - self.net(x)\n\n def load(self, path):\n self.load_state_dict(torch.load(path))\n return self" }, { "identifier": "Laplacian", "path": "loss/bias.py", "snippet": "class Laplacian():\n def __init__(self, model):\n self.model = model\n \n def get_sum_of_gradients_log_p(self, x):\n log_p = self.model.minus_forward(x)\n log_p_gradient = torch.autograd.grad(\n outputs=log_p, inputs=x,\n grad_outputs=torch.ones_like(log_p),\n create_graph=True, only_inputs=True\n )[0]\n \n return log_p_gradient.sum(0)\n\n def get_laplacian(self, x):\n return jacobian(self.get_sum_of_gradients_log_p, x).swapaxes(0, 1).diagonal(dim1=-2, dim2=-1).sum(-1)" }, { "identifier": "sliced_VR_score_matching", "path": "loss/sliced_score_matching.py", "snippet": "def sliced_VR_score_matching(energy_net, samples, noise=None, detach=False, noise_type='gaussian'):\n \"\"\" Sliced score matching loss from:\n https://github.com/ermongroup/sliced_score_matching/\n \"\"\"\n samples.requires_grad_(True)\n if noise is None:\n vectors = torch.randn_like(samples)\n if noise_type == 'radermacher':\n vectors = vectors.sign()\n elif noise_type == 'gaussian':\n pass\n else:\n raise ValueError(\"Noise type not implemented\")\n else:\n vectors = noise\n\n logp = -energy_net(samples).sum()\n grad1 = torch.autograd.grad(logp, samples, create_graph=True)[0]\n gradv = torch.sum(grad1 * vectors)\n loss1 = torch.norm(grad1, dim=-1) ** 2 * 0.5\n if detach:\n loss1 = loss1.detach()\n grad2 = torch.autograd.grad(gradv, samples, create_graph=True)[0]\n loss2 = torch.sum(vectors * grad2, dim=-1)\n if detach:\n loss2 = loss2.detach()\n\n loss = (loss1 + loss2).mean()\n return loss" } ]
import torch import argparse import numpy as np from KDE import find_optimal_bandwidth from ratio import KernelRatioNaive, KernelRatioAlpha, KernelRatioGaussian from model.energy import Score_network, Weight_network, Energy from loss.bias import Laplacian from loss.sliced_score_matching import sliced_VR_score_matching from scipy.spatial.distance import pdist
10,813
data1 = torch.randn(args.num_data, args.dim) @ L.T + mean1.astype(np.float32) L = torch.linalg.cholesky(torch.tensor(Cov2.astype(np.float32))) data2 = torch.randn(args.num_data, args.dim) @ L.T + mean2.astype(np.float32) TKL = (np.trace(np.linalg.inv(Cov2) @ Cov1) + (mean2-mean1).T @ np.linalg.inv(Cov2) @ (mean2-mean1) - args.dim + np.log(np.linalg.det(Cov2)/np.linalg.det(Cov1)))/2 print(f"True KL divergence: {TKL}") data1_set = torch.utils.data.TensorDataset(data1) data2_set = torch.utils.data.TensorDataset(data2) total_set = torch.utils.data.TensorDataset(torch.cat([data1, data2])) data1_loader = torch.utils.data.DataLoader(data1_set, batch_size=args.batch_size, shuffle=True) data2_loader = torch.utils.data.DataLoader(data2_set, batch_size=args.batch_size, shuffle=True) total_loader = torch.utils.data.DataLoader(total_set, batch_size=args.batch_size, shuffle=True) l_h = np.linspace(0.2, 1., 20) if args.model == "KDE": opt_h1 = find_optimal_bandwidth(data1, l_h, lik=False, gpu=gpu) opt_h2 = find_optimal_bandwidth(data2, l_h, lik=False, gpu=gpu) opt_h = (opt_h1 + opt_h2) / 2 model = KernelRatioNaive(h=opt_h, gpu=gpu) model.eps = 1e-100 model.fit(data1, data2) print(f"KL divergence calculated by KDE: {model.kl()}") elif args.model == "based": opt_h1 = find_optimal_bandwidth(data1[:int(len(data1)/4)], l_h, gpu=gpu) opt_h2 = find_optimal_bandwidth(data2[:int(len(data2)/4)], l_h, gpu=gpu) opt_h = (opt_h1 + opt_h2) / 2 med_dist1 = np.median(pdist(data1)) med_dist2 = np.median(pdist(data2)) med_dist = (med_dist1 + med_dist2) / 2 model = KernelRatioGaussian(grid_sample=3000, solver='para', para_h=med_dist, para_l=0.1, h=opt_h, gpu=gpu, kmeans=False, einsum_batch=100, reg=0.1, stabilize=True, online=True) model.eps = 1e-100 model.fit(data1, data2) print(f"KL divergence calculated by VWKDE model based: {model.kl()}") elif args.model == "free": score_model_p1 = Energy(net=Score_network(input_dim=args.dim, units=[300,300], dropout=True)).to(args.device) score_model_p2 = Energy(net=Score_network(input_dim=args.dim, units=[300,300], dropout=True)).to(args.device) optimizer_sp1 = torch.optim.Adam(score_model_p1.parameters(), lr=1e-4) optimizer_sp2 = torch.optim.Adam(score_model_p2.parameters(), lr=1e-4) print("Train score models for p1 and p2") for epoch in range(args.score_epoch): loss1 = 0 loss2 = 0 for x in data1_loader: x = x[0].to(args.device) loss = sliced_VR_score_matching(score_model_p1, x) optimizer_sp1.zero_grad() loss.backward() optimizer_sp1.step() loss1 += loss.item() / len(data1_loader) for x in data2_loader: x = x[0].to(args.device) loss = sliced_VR_score_matching(score_model_p2, x) optimizer_sp2.zero_grad() loss.backward() optimizer_sp2.step() loss2 += loss.item() / len(data2_loader) if epoch % 100 == 99: print(f"Epoch: {epoch+1} | Loss1: {loss1}") print(f"Epoch: {epoch+1} | Loss2: {loss2}") score_model_p1.eval() score_model_p2.eval() p1_laplacian = Laplacian(score_model_p1) p2_laplacian = Laplacian(score_model_p2) weight_model = Energy(net=Weight_network(input_dim=args.dim, units=[128,128,128,64], dropout=False)).to(args.device) optimizer_w = torch.optim.Adam(weight_model.parameters(), lr=1e-3) print("Train a weight model") for epoch in range(args.weight_epoch): total_loss = 0 for x in total_loader: x = x[0].to(args.device).requires_grad_() output = weight_model(x) output_gradient = torch.autograd.grad( outputs=output, inputs=x, grad_outputs=torch.ones_like(output), create_graph=True, only_inputs=True )[0] log_p1 = score_model_p1.minus_forward(x) grad_logp1 = torch.autograd.grad( outputs=log_p1.view(-1, 1), inputs=x, grad_outputs=torch.ones_like(output), create_graph=True, only_inputs=True )[0] log_p2 = score_model_p2.minus_forward(x) grad_logp2 = torch.autograd.grad( outputs=log_p2.view(-1, 1), inputs=x, grad_outputs=torch.ones_like(output), create_graph=True, only_inputs=True )[0] lp_p1 = p1_laplacian.get_laplacian(x) - (grad_logp1**2).sum(1) lp_p2 = p2_laplacian.get_laplacian(x) - (grad_logp2**2).sum(1) loss = ((((output_gradient)*(-grad_logp1+grad_logp2)).sum(1) + 0.5*(lp_p1-lp_p2))**2).mean() optimizer_w.zero_grad() loss.backward() optimizer_w.step() total_loss += loss.item() / len(total_loader) if epoch % 100 == 99: print(f"Epoch: {epoch+1} | Loss: {total_loss}") weight_model.eval() opt_h1 = find_optimal_bandwidth(data1[:int(len(data1)/4)], l_h, gpu=gpu) opt_h2 = find_optimal_bandwidth(data2[:int(len(data2)/4)], l_h, gpu=gpu) opt_h = (opt_h1 + opt_h2) / 2 logWeights1 = weight_model(data1.to(args.device)).detach().cpu().numpy() logWeights2 = weight_model(data2.to(args.device)).detach().cpu().numpy()
parser = argparse.ArgumentParser() parser.add_argument('--device', type=str, default='cuda') parser.add_argument('--model', type=str, default='KDE') parser.add_argument('--dim', type=int, default=20) parser.add_argument('--score_epoch', type=int, default=500) parser.add_argument('--weight_epoch', type=int, default=200) parser.add_argument('--batch_size', type=int, default=1024) parser.add_argument('--num_data', type=int, default=1024) args = parser.parse_args() if args.device == 'cuda': gpu=True else: gpu=False mean1 = np.concatenate([np.array([0]), np.zeros((args.dim-1,))]) Cov1 = np.eye(args.dim)*np.concatenate([np.array([1.]), np.ones((args.dim-1,))]) mean2 = np.concatenate([np.sqrt([2]), np.zeros((args.dim-1,))]) Cov2 = np.eye(args.dim)*np.concatenate([np.array([1.]), np.ones((args.dim-1,))]) L = torch.linalg.cholesky(torch.tensor(Cov1.astype(np.float32))) data1 = torch.randn(args.num_data, args.dim) @ L.T + mean1.astype(np.float32) L = torch.linalg.cholesky(torch.tensor(Cov2.astype(np.float32))) data2 = torch.randn(args.num_data, args.dim) @ L.T + mean2.astype(np.float32) TKL = (np.trace(np.linalg.inv(Cov2) @ Cov1) + (mean2-mean1).T @ np.linalg.inv(Cov2) @ (mean2-mean1) - args.dim + np.log(np.linalg.det(Cov2)/np.linalg.det(Cov1)))/2 print(f"True KL divergence: {TKL}") data1_set = torch.utils.data.TensorDataset(data1) data2_set = torch.utils.data.TensorDataset(data2) total_set = torch.utils.data.TensorDataset(torch.cat([data1, data2])) data1_loader = torch.utils.data.DataLoader(data1_set, batch_size=args.batch_size, shuffle=True) data2_loader = torch.utils.data.DataLoader(data2_set, batch_size=args.batch_size, shuffle=True) total_loader = torch.utils.data.DataLoader(total_set, batch_size=args.batch_size, shuffle=True) l_h = np.linspace(0.2, 1., 20) if args.model == "KDE": opt_h1 = find_optimal_bandwidth(data1, l_h, lik=False, gpu=gpu) opt_h2 = find_optimal_bandwidth(data2, l_h, lik=False, gpu=gpu) opt_h = (opt_h1 + opt_h2) / 2 model = KernelRatioNaive(h=opt_h, gpu=gpu) model.eps = 1e-100 model.fit(data1, data2) print(f"KL divergence calculated by KDE: {model.kl()}") elif args.model == "based": opt_h1 = find_optimal_bandwidth(data1[:int(len(data1)/4)], l_h, gpu=gpu) opt_h2 = find_optimal_bandwidth(data2[:int(len(data2)/4)], l_h, gpu=gpu) opt_h = (opt_h1 + opt_h2) / 2 med_dist1 = np.median(pdist(data1)) med_dist2 = np.median(pdist(data2)) med_dist = (med_dist1 + med_dist2) / 2 model = KernelRatioGaussian(grid_sample=3000, solver='para', para_h=med_dist, para_l=0.1, h=opt_h, gpu=gpu, kmeans=False, einsum_batch=100, reg=0.1, stabilize=True, online=True) model.eps = 1e-100 model.fit(data1, data2) print(f"KL divergence calculated by VWKDE model based: {model.kl()}") elif args.model == "free": score_model_p1 = Energy(net=Score_network(input_dim=args.dim, units=[300,300], dropout=True)).to(args.device) score_model_p2 = Energy(net=Score_network(input_dim=args.dim, units=[300,300], dropout=True)).to(args.device) optimizer_sp1 = torch.optim.Adam(score_model_p1.parameters(), lr=1e-4) optimizer_sp2 = torch.optim.Adam(score_model_p2.parameters(), lr=1e-4) print("Train score models for p1 and p2") for epoch in range(args.score_epoch): loss1 = 0 loss2 = 0 for x in data1_loader: x = x[0].to(args.device) loss = sliced_VR_score_matching(score_model_p1, x) optimizer_sp1.zero_grad() loss.backward() optimizer_sp1.step() loss1 += loss.item() / len(data1_loader) for x in data2_loader: x = x[0].to(args.device) loss = sliced_VR_score_matching(score_model_p2, x) optimizer_sp2.zero_grad() loss.backward() optimizer_sp2.step() loss2 += loss.item() / len(data2_loader) if epoch % 100 == 99: print(f"Epoch: {epoch+1} | Loss1: {loss1}") print(f"Epoch: {epoch+1} | Loss2: {loss2}") score_model_p1.eval() score_model_p2.eval() p1_laplacian = Laplacian(score_model_p1) p2_laplacian = Laplacian(score_model_p2) weight_model = Energy(net=Weight_network(input_dim=args.dim, units=[128,128,128,64], dropout=False)).to(args.device) optimizer_w = torch.optim.Adam(weight_model.parameters(), lr=1e-3) print("Train a weight model") for epoch in range(args.weight_epoch): total_loss = 0 for x in total_loader: x = x[0].to(args.device).requires_grad_() output = weight_model(x) output_gradient = torch.autograd.grad( outputs=output, inputs=x, grad_outputs=torch.ones_like(output), create_graph=True, only_inputs=True )[0] log_p1 = score_model_p1.minus_forward(x) grad_logp1 = torch.autograd.grad( outputs=log_p1.view(-1, 1), inputs=x, grad_outputs=torch.ones_like(output), create_graph=True, only_inputs=True )[0] log_p2 = score_model_p2.minus_forward(x) grad_logp2 = torch.autograd.grad( outputs=log_p2.view(-1, 1), inputs=x, grad_outputs=torch.ones_like(output), create_graph=True, only_inputs=True )[0] lp_p1 = p1_laplacian.get_laplacian(x) - (grad_logp1**2).sum(1) lp_p2 = p2_laplacian.get_laplacian(x) - (grad_logp2**2).sum(1) loss = ((((output_gradient)*(-grad_logp1+grad_logp2)).sum(1) + 0.5*(lp_p1-lp_p2))**2).mean() optimizer_w.zero_grad() loss.backward() optimizer_w.step() total_loss += loss.item() / len(total_loader) if epoch % 100 == 99: print(f"Epoch: {epoch+1} | Loss: {total_loss}") weight_model.eval() opt_h1 = find_optimal_bandwidth(data1[:int(len(data1)/4)], l_h, gpu=gpu) opt_h2 = find_optimal_bandwidth(data2[:int(len(data2)/4)], l_h, gpu=gpu) opt_h = (opt_h1 + opt_h2) / 2 logWeights1 = weight_model(data1.to(args.device)).detach().cpu().numpy() logWeights2 = weight_model(data2.to(args.device)).detach().cpu().numpy()
kl_model = KernelRatioAlpha(opt_h, gpu=gpu)
2
2023-10-27 04:47:03+00:00
16k
Sllambias/yucca
yucca/deprecated/YuccaPreprocessor_MultiTask.py
[ { "identifier": "YuccaPreprocessor", "path": "yucca/preprocessing/YuccaPreprocessor.py", "snippet": "class YuccaPreprocessor(object):\n \"\"\"\n The YuccaPreprocessor class is designed to preprocess medical images for the Yucca project.\n It implements various preprocessing steps, such as reorientation, cropping, normalization, and resizing,\n based on the plans specified in an YuccaPlanner.\n\n For training the _preprocess_train_subject method prepares input images for the Yucca model.\n The preprocess_case_for_inference method prepares input images for the Yucca model during the inference phase,\n ensuring that they match the requirements specified during training.\n The reverse_preprocessing method is then used to revert the processed images back to their original form,\n allowing for a meaningful interpretation of the model's predictions.\n These methods collectively provide a consistent and reversible preprocessing pipeline for both training and inference.\n\n The operations that can be enabled/defined in the YuccaPlanner and carried out by the\n YuccaPreprocessor are:\n\n (1) The starting orientation - defaults to RAS (for medical images).\n (2) The cropping operation - defaults to crop to nonzero bounding box\n (3) The Transposition operation (along with the reverse transpose operation,\n to be used during inference) - defaults to no transposition if image dimensions and spacings\n are not too anisotropic.\n (4) The Resample operation - defaults to resampling to the median spacing of the dataset.\n (5) The Normalization operation - defaults to standardization = (image - mean) / std\n per modality to preserve ranges to account for CT pixel values representing specific physical\n attributes.\n\n Additionally it carries out a number of tests and analyzes each image for foreground locations\n which is used later to oversample foreground.\n \"\"\"\n\n def __init__(self, plans_path, task=None, threads=12, disable_sanity_checks=False):\n self.name = str(self.__class__.__name__)\n self.task = task\n self.plans_path = plans_path\n self.plans = self.load_plans(plans_path)\n self.threads = threads\n self.disable_sanity_checks = disable_sanity_checks\n\n # lists for information we would like to attain\n self.transpose_forward = []\n self.transpose_backward = []\n self.target_spacing = []\n\n def initialize_paths(self):\n self.target_dir = join(yucca_preprocessed_data, self.task, self.plans[\"plans_name\"])\n self.input_dir = join(yucca_raw_data, self.task)\n self.imagepaths = subfiles(join(self.input_dir, \"imagesTr\"), suffix=self.image_extension)\n self.subject_ids = [\n file for file in subfiles(join(self.input_dir, \"labelsTr\"), join=False) if not file.startswith(\".\")\n ]\n\n def initialize_properties(self):\n \"\"\"\n here we basically set up things that are needed for preprocessing during training,\n but that aren't necessary during inference\n \"\"\"\n self.dataset_properties = self.plans[\"dataset_properties\"]\n self.intensities = self.dataset_properties[\"intensities\"]\n self.image_extension = self.dataset_properties.get(\"image_extension\") or \"nii.gz\"\n\n # op values\n self.transpose_forward = np.array(self.plans[\"transpose_forward\"], dtype=int)\n self.transpose_backward = np.array(self.plans[\"transpose_backward\"], dtype=int)\n self.target_spacing = np.array(self.plans[\"target_spacing\"], dtype=float)\n\n @staticmethod\n def load_plans(plans_path):\n if os.path.splitext(plans_path)[-1] == \".json\":\n return load_json(plans_path)\n if os.path.splitext(plans_path)[-1] == \".yaml\":\n return load_yaml(plans_path)[\"config\"][\"plans\"]\n else:\n raise FileNotFoundError(\n f\"Plan file not found. Got {plans_path} with ext {os.path.splitext(plans_path)[-1]}. Expects either a '.json' or '.yaml' file.\"\n )\n\n def run(self):\n self.initialize_properties()\n self.initialize_paths()\n maybe_mkdir_p(self.target_dir)\n\n logging.info(\n f\"{'Preprocessing Task:':25.25} {self.task} \\n\"\n f\"{'Using Planner:':25.25} {self.plans_path} \\n\"\n f\"{'Crop to nonzero:':25.25} {self.plans['crop_to_nonzero']} \\n\"\n f\"{'Normalization scheme:':25.25} {self.plans['normalization_scheme']} \\n\"\n f\"{'Transpose Forward:':25.25} {self.transpose_forward} \\n\"\n f\"{'Transpose Backward:':25.25} {self.transpose_backward} \\n\"\n )\n p = Pool(self.threads)\n\n p.map(self._preprocess_train_subject, self.subject_ids)\n p.close()\n p.join()\n\n def _preprocess_train_subject(self, subject_id):\n \"\"\"\n This is the bread and butter of the preprocessor.\n The following steps are taken:\n\n (1) Load Images:\n Extract relevant image files associated with the given subject_id.\n Load the images using the nibabel library.\n\n (2) Reorientation (Optional):\n Check if valid qform or sform codes are present in the header.\n If valid, reorient the images to the target orientation specified in the plans.\n Update the original and new orientation information in the image_props dictionary.\n\n (3) Normalization and Transposition:\n Normalize each image based on the specified normalization scheme and intensities.\n Transpose the images according to the forward transpose axes specified in the plans.\n\n (4) Cropping (Optional):\n If the crop_to_nonzero option is enabled in the plans, crop the images to the nonzero bounding box.\n Update the image_props dictionary with cropping information.\n\n (5) Resampling:\n Resample images to the target spacing specified in the plans.\n Update the image_props dictionary with original and new spacing information.\n\n (6) Foreground Locations:\n Extract some locations of the foreground, which will be used in oversampling of foreground classes.\n Determine the number and sizes of connected components in the ground truth label (can be used in analysis).\n\n (7) Save Preprocessed Data:\n Stack the preprocessed images and label.\n Save the preprocessed data as a NumPy array in a .npy file.\n Save relevant metadata as a .pkl file.\n\n (8) Print Information:\n Print information about the size and spacing before and after preprocessing.\n Print the path where the preprocessed data is saved.\n \"\"\"\n image_props = {}\n subject_id = subject_id.split(os.extsep, 1)[0]\n logging.info(f\"Preprocessing: {subject_id}\")\n arraypath = join(self.target_dir, subject_id + \".npy\")\n picklepath = join(self.target_dir, subject_id + \".pkl\")\n\n if isfile(arraypath) and isfile(picklepath):\n logging.info(f\"Case: {subject_id} already exists. Skipping.\")\n return\n # First find relevant images by their paths and save them in the image property pickle\n # Then load them as images\n # The '_' in the end is to avoid treating Case_4_000 AND Case_42_000 as different versions\n # of the label named Case_4 as both would start with \"Case_4\", however only the correct one is\n # followed by an underscore\n imagepaths = [impath for impath in self.imagepaths if os.path.split(impath)[-1].startswith(subject_id + \"_\")]\n image_props[\"image files\"] = imagepaths\n images = [nib.load(image) for image in imagepaths]\n\n # Do the same with label\n label = join(self.input_dir, \"labelsTr\", subject_id + \".nii.gz\")\n image_props[\"label file\"] = label\n label = nib.load(label)\n\n if not self.disable_sanity_checks:\n assert len(images) > 0, f\"found no images for {subject_id + '_'}, \" f\"attempted imagepaths: {imagepaths}\"\n\n assert (\n len(images[0].shape) == self.plans[\"dataset_properties\"][\"data_dimensions\"]\n ), f\"image should be shape (x, y(, z)) but is {images[0].shape}\"\n\n # make sure images and labels are correctly registered\n assert images[0].shape == label.shape, (\n f\"Sizes do not match for {subject_id}\" f\"Image is: {images[0].shape} while the label is {label.shape}\"\n )\n\n assert np.allclose(get_nib_spacing(images[0]), get_nib_spacing(label)), (\n f\"Spacings do not match for {subject_id}\"\n f\"Image is: {get_nib_spacing(images[0])} while the label is {get_nib_spacing(label)}\"\n )\n\n assert get_nib_orientation(images[0]) == get_nib_orientation(label), (\n f\"Directions do not match for {subject_id}\"\n f\"Image is: {get_nib_orientation(images[0])} while the label is {get_nib_orientation(label)}\"\n )\n\n # Make sure all modalities are correctly registered\n if len(images) > 1:\n for image in images:\n assert images[0].shape == image.shape, (\n f\"Sizes do not match for {subject_id}\" f\"One is: {images[0].shape} while another is {image.shape}\"\n )\n\n assert np.allclose(get_nib_spacing(images[0]), get_nib_spacing(image)), (\n f\"Spacings do not match for {subject_id}\"\n f\"One is: {get_nib_spacing(images[0])} while another is {get_nib_spacing(image)}\"\n )\n\n assert get_nib_orientation(images[0]) == get_nib_orientation(image), (\n f\"Directions do not match for {subject_id}\"\n f\"One is: {get_nib_orientation(images[0])} while another is {get_nib_orientation(image)}\"\n )\n\n original_spacing = get_nib_spacing(images[0])\n original_size = np.array(images[0].shape)\n\n if self.target_spacing.size:\n target_spacing = self.target_spacing\n else:\n target_spacing = original_spacing\n\n # If qform and sform are both missing the header is corrupt and we do not trust the\n # direction from the affine\n # Make sure you know what you're doing\n if images[0].get_qform(coded=True)[1] or images[0].get_sform(coded=True)[1]:\n original_orientation = get_nib_orientation(images[0])\n final_direction = self.plans[\"target_coordinate_system\"]\n images = [reorient_nib_image(image, original_orientation, final_direction) for image in images]\n label = reorient_nib_image(label, original_orientation, final_direction)\n else:\n original_orientation = \"INVALID\"\n final_direction = \"INVALID\"\n\n images = [nifti_or_np_to_np(image) for image in images]\n label = nifti_or_np_to_np(label)\n\n # Check if the ground truth only contains expected values\n expected_labels = np.array(self.plans[\"dataset_properties\"][\"classes\"], dtype=np.float32)\n actual_labels = np.unique(label).astype(np.float32)\n assert np.all(np.isin(actual_labels, expected_labels)), (\n f\"Unexpected labels found for {subject_id} \\n\" f\"expected: {expected_labels} \\n\" f\"found: {actual_labels}\"\n )\n\n # Cropping is performed to save computational resources. We are only removing background.\n if self.plans[\"crop_to_nonzero\"]:\n nonzero_box = get_bbox_for_foreground(images[0], background_label=0)\n image_props[\"crop_to_nonzero\"] = nonzero_box\n for i in range(len(images)):\n images[i] = crop_to_box(images[i], nonzero_box)\n label = crop_to_box(label, nonzero_box)\n else:\n image_props[\"crop_to_nonzero\"] = self.plans[\"crop_to_nonzero\"]\n\n images, label = self._resample_and_normalize_case(\n images,\n label,\n self.plans[\"normalization_scheme\"],\n self.transpose_forward,\n original_spacing,\n target_spacing,\n )\n\n # Stack and fix dimensions\n images = np.vstack((np.array(images), np.array(label)[np.newaxis]))\n\n # now AFTER transposition etc., we get some (no need to get all)\n # locations of foreground, that we will later use in the\n # oversampling of foreground classes\n foreground_locs = np.array(np.nonzero(images[-1])).T[::10]\n numbered_ground_truth, ground_truth_numb_lesion = cc3d.connected_components(images[-1], connectivity=26, return_N=True)\n if ground_truth_numb_lesion == 0:\n object_sizes = 0\n else:\n object_sizes = [i * np.prod(target_spacing) for i in np.unique(numbered_ground_truth, return_counts=True)[-1][1:]]\n\n final_size = list(images[0].shape)\n\n # save relevant values\n image_props[\"original_spacing\"] = original_spacing\n image_props[\"original_size\"] = original_size\n image_props[\"original_orientation\"] = original_orientation\n image_props[\"new_spacing\"] = target_spacing[self.transpose_forward].tolist()\n image_props[\"new_size\"] = final_size\n image_props[\"new_direction\"] = final_direction\n image_props[\"foreground_locations\"] = foreground_locs\n image_props[\"n_cc\"] = ground_truth_numb_lesion\n image_props[\"size_cc\"] = object_sizes\n\n logging.info(\n f\"size before: {original_size} size after: {image_props['new_size']} \\n\"\n f\"spacing before: {original_spacing} spacing after: {image_props['new_spacing']} \\n\"\n f\"Saving {subject_id} in {arraypath} \\n\"\n )\n\n # save the image\n np.save(arraypath, images)\n\n # save metadata as .pkl\n save_pickle(image_props, picklepath)\n\n def _resample_and_normalize_case(\n self,\n images: list,\n label: np.ndarray = None,\n norm_op=None,\n transpose=None,\n original_spacing=None,\n target_spacing=None,\n ):\n # Normalize and Transpose images to target view.\n # Transpose labels to target view.\n assert len(images) == len(norm_op) == len(self.intensities), (\n \"number of images, \"\n \"normalization operations and intensities does not match. \\n\"\n f\"len(images) == {len(images)} \\n\"\n f\"len(norm_op) == {len(norm_op)} \\n\"\n f\"len(self.intensities) == {len(self.intensities)} \\n\"\n )\n\n for i in range(len(images)):\n image = images[i]\n assert image is not None\n\n images[i] = normalizer(image, scheme=norm_op[i], intensities=self.intensities[i])\n assert len(images[i].shape) == len(transpose), (\n \"image and transpose axes do not match. \\n\"\n f\"images[i].shape == {images[i].shape} \\n\"\n f\"transpose == {transpose} \\n\"\n f\"len(images[i].shape) == {len(images[i]).shape} \\n\"\n f\"len(transpose) == {len(transpose)} \\n\"\n )\n images[i] = images[i].transpose(transpose)\n logging.info(f\"Normalized with: {norm_op[0]} \\n\" f\"Transposed with: {transpose}\")\n\n shape_t = images[0].shape\n original_spacing_t = original_spacing[transpose]\n target_spacing_t = target_spacing[transpose]\n\n # Find new shape based on the target spacing\n target_shape = np.round((original_spacing_t / target_spacing_t).astype(float) * shape_t).astype(int)\n\n # Resample to target shape and spacing\n for i in range(len(images)):\n try:\n images[i] = resize(images[i], output_shape=target_shape, order=3)\n except OverflowError:\n logging.error(\"Unexpected values in either shape or image for resize\")\n if label is not None:\n label = label.transpose(transpose)\n try:\n label = resize(label, output_shape=target_shape, order=0, anti_aliasing=False)\n except OverflowError:\n logging.error(\"Unexpected values in either shape or label for resize\")\n return images, label\n\n return images\n\n def preprocess_case_for_inference(self, images: list | tuple, patch_size: tuple):\n \"\"\"\n Will reorient ONLY if we have valid qform or sform codes.\n with coded=True the methods will return {affine or None} and {0 or 1}.\n If both are 0 we cannot rely on headers for orientations and will\n instead assume images are in the desired orientation already.\n\n Afterwards images will be normalized and transposed as specified by the\n plans file also used in training.\n\n Finally images are resampled to the required spacing/size and returned\n as torch tensors of the required shape (b, c, x, y, (z))\n \"\"\"\n assert isinstance(images, (list, tuple)), \"image(s) should be a list or tuple, even if only one \" \"image is passed\"\n self.initialize_properties()\n image_properties = {}\n ext = images[0][0].split(os.extsep, 1)[1] if isinstance(images[0], tuple) else images[0].split(os.extsep, 1)[1]\n images = [\n read_file_to_nifti_or_np(image[0]) if isinstance(image, tuple) else read_file_to_nifti_or_np(image)\n for image in images\n ]\n\n image_properties[\"image_extension\"] = ext\n image_properties[\"original_shape\"] = np.array(images[0].shape)\n\n assert len(image_properties[\"original_shape\"]) in [\n 2,\n 3,\n ], \"images must be either 2D or 3D for preprocessing\"\n\n image_properties[\"original_spacing\"] = np.array([1.0] * len(image_properties[\"original_shape\"]))\n image_properties[\"qform\"] = None\n image_properties[\"sform\"] = None\n image_properties[\"reoriented\"] = False\n image_properties[\"affine\"] = None\n\n if isinstance(images[0], nib.Nifti1Image):\n image_properties[\"original_spacing\"] = get_nib_spacing(images[0])\n image_properties[\"qform\"] = images[0].get_qform()\n image_properties[\"sform\"] = images[0].get_sform()\n # Check if header is valid and then attempt to orient to target orientation.\n if (\n images[0].get_qform(coded=True)[1]\n or images[0].get_sform(coded=True)[1]\n and self.plans.get(\"target_coordinate_system\")\n ):\n image_properties[\"reoriented\"] = True\n original_orientation = get_nib_orientation(images[0])\n image_properties[\"original_orientation\"] = original_orientation\n images = [\n reorient_nib_image(image, original_orientation, self.plans[\"target_coordinate_system\"]) for image in images\n ]\n image_properties[\"new_orientation\"] = get_nib_orientation(images[0])\n image_properties[\"affine\"] = images[0].affine\n\n images = [nifti_or_np_to_np(image) for image in images]\n image_properties[\"original_spacing\"] = np.array([1.0] * len(image_properties[\"original_shape\"]))\n image_properties[\"qform\"] = None\n image_properties[\"sform\"] = None\n image_properties[\"reoriented\"] = False\n image_properties[\"affine\"] = None\n\n if isinstance(images[0], nib.Nifti1Image):\n image_properties[\"original_spacing\"] = get_nib_spacing(images[0])\n image_properties[\"qform\"] = images[0].get_qform()\n image_properties[\"sform\"] = images[0].get_sform()\n # Check if header is valid and then attempt to orient to target orientation.\n if (\n images[0].get_qform(coded=True)[1]\n or images[0].get_sform(coded=True)[1]\n and self.plans.get(\"target_coordinate_system\")\n ):\n image_properties[\"reoriented\"] = True\n original_orientation = get_nib_orientation(images[0])\n image_properties[\"original_orientation\"] = original_orientation\n images = [\n reorient_nib_image(image, original_orientation, self.plans[\"target_coordinate_system\"]) for image in images\n ]\n image_properties[\"new_orientation\"] = get_nib_orientation(images[0])\n image_properties[\"affine\"] = images[0].affine\n\n images = [nifti_or_np_to_np(image) for image in images]\n\n image_properties[\"uncropped_shape\"] = np.array(images[0].shape)\n\n if self.plans[\"crop_to_nonzero\"]:\n nonzero_box = get_bbox_for_foreground(images[0], background_label=0)\n for i in range(len(images)):\n images[i] = crop_to_box(images[i], nonzero_box)\n image_properties[\"nonzero_box\"] = nonzero_box\n\n image_properties[\"cropped_shape\"] = np.array(images[0].shape)\n\n images = self._resample_and_normalize_case(\n images,\n norm_op=self.plans[\"normalization_scheme\"],\n transpose=self.transpose_forward,\n original_spacing=image_properties[\"original_spacing\"],\n target_spacing=self.target_spacing,\n )\n\n # From this point images are shape (1, c, x, y, z)\n image_properties[\"resampled_transposed_shape\"] = np.array(images[0].shape)\n\n for i in range(len(images)):\n images[i], padding = pad_to_size(images[i], patch_size)\n image_properties[\"padded_shape\"] = np.array(images[0].shape)\n image_properties[\"padding\"] = padding\n\n # Stack and fix dimensions\n images = np.stack(images)[np.newaxis]\n\n return torch.tensor(images, dtype=torch.float32), image_properties\n\n def reverse_preprocessing(self, images: torch.Tensor, image_properties: dict):\n \"\"\"\n Expected shape of images are:\n (b, c, x, y(, z))\n\n (1) Initialization: Extract relevant properties from the image_properties dictionary.\n (2) Padding Reversion: Reverse the padding applied during preprocessing.\n (3) Resampling and Transposition Reversion: Resize the images to revert the resampling operation.\n Transpose the images back to the original orientation.\n (4) Cropping Reversion (Optional): If cropping to the nonzero bounding box was applied, revert the cropping operation.\n (5) Return: Return the reverted images as a NumPy array.\n The original orientation of the image will be re-applied when saving the prediction\n \"\"\"\n image_properties[\"save_format\"] = image_properties.get(\"image_extension\")\n nclasses = max(1, len(self.plans[\"dataset_properties\"][\"classes\"]))\n canvas = torch.zeros((1, nclasses, *image_properties[\"uncropped_shape\"]), dtype=images.dtype)\n shape_after_crop = image_properties[\"cropped_shape\"]\n shape_after_crop_transposed = shape_after_crop[self.transpose_forward]\n pad = image_properties[\"padding\"]\n\n assert np.all(images.shape[2:] == image_properties[\"padded_shape\"]), (\n f\"Reversing padding: \"\n f\"image should be of shape: {image_properties['padded_shape']}\"\n f\"but is: {images.shape[2:]}\"\n )\n shape = images.shape[2:]\n if len(pad) == 6:\n images = images[\n :,\n :,\n pad[0] : shape[0] - pad[1],\n pad[2] : shape[1] - pad[3],\n pad[4] : shape[2] - pad[5],\n ]\n elif len(pad) == 4:\n images = images[:, :, pad[0] : shape[0] - pad[1], pad[2] : shape[1] - pad[3]]\n\n assert np.all(images.shape[2:] == image_properties[\"resampled_transposed_shape\"]), (\n f\"Reversing resampling and tranposition: \"\n f\"image should be of shape: {image_properties['resampled_transposed_shape']}\"\n f\"but is: {images.shape[2:]}\"\n )\n # Here we Interpolate the array to the original size. The shape starts as [H, W (,D)]. For Torch functionality it is changed to [B, C, H, W (,D)].\n # Afterwards it's squeezed back into [H, W (,D)] and transposed to the original direction.\n images = F.interpolate(images, size=shape_after_crop_transposed.tolist(), mode=\"trilinear\").permute(\n [0, 1] + [i + 2 for i in self.transpose_backward]\n )\n\n assert np.all(images.shape[2:] == image_properties[\"cropped_shape\"]), (\n f\"Reversing cropping: \"\n f\"image should be of shape: {image_properties['cropped_shape']}\"\n f\"but is: {images.shape[2:]}\"\n )\n assert np.all(images.shape[2:] == image_properties[\"resampled_transposed_shape\"]), (\n f\"Reversing resampling and tranposition: \"\n f\"image should be of shape: {image_properties['resampled_transposed_shape']}\"\n f\"but is: {images.shape[2:]}\"\n )\n # Here we Interpolate the array to the original size. The shape starts as [H, W (,D)]. For Torch functionality it is changed to [B, C, H, W (,D)].\n # Afterwards it's squeezed back into [H, W (,D)] and transposed to the original direction.\n images = F.interpolate(images, size=shape_after_crop_transposed.tolist(), mode=\"trilinear\").permute(\n [0, 1] + [i + 2 for i in self.transpose_backward]\n )\n\n assert np.all(images.shape[2:] == image_properties[\"cropped_shape\"]), (\n f\"Reversing cropping: \"\n f\"image should be of shape: {image_properties['cropped_shape']}\"\n f\"but is: {images.shape[2:]}\"\n )\n assert np.all(images.shape[2:] == image_properties[\"resampled_transposed_shape\"]), (\n f\"Reversing resampling and tranposition: \"\n f\"image should be of shape: {image_properties['resampled_transposed_shape']}\"\n f\"but is: {images.shape[2:]}\"\n )\n # Here we Interpolate the array to the original size. The shape starts as [H, W (,D)]. For Torch functionality it is changed to [B, C, H, W (,D)].\n # Afterwards it's squeezed back into [H, W (,D)] and transposed to the original direction.\n images = F.interpolate(images, size=shape_after_crop_transposed.tolist(), mode=\"trilinear\").permute(\n [0, 1] + [i + 2 for i in self.transpose_backward]\n )\n\n assert np.all(images.shape[2:] == image_properties[\"cropped_shape\"]), (\n f\"Reversing cropping: \"\n f\"image should be of shape: {image_properties['cropped_shape']}\"\n f\"but is: {images.shape[2:]}\"\n )\n\n if self.plans[\"crop_to_nonzero\"]:\n bbox = image_properties[\"nonzero_box\"]\n slices = [\n slice(None),\n slice(None),\n slice(bbox[0], bbox[1]),\n slice(bbox[2], bbox[3]),\n ]\n if len(bbox) == 6:\n slices.append(\n slice(bbox[4], bbox[5]),\n )\n canvas[slices] = images\n else:\n canvas = images\n return canvas.numpy(), image_properties" }, { "identifier": "yucca_preprocessed_data", "path": "yucca/paths.py", "snippet": "" }, { "identifier": "normalizer", "path": "yucca/preprocessing/normalization.py", "snippet": "def normalizer(array: np.ndarray, scheme: str, intensities: {}):\n \"\"\"\n Normalizing function for preprocessing and inference.\n\n supported schemes can be either:\n None = for no normalization. Generally not recommended.\n MinMax = for 0-1 or Min-Max normalization.\n Standardize = (array - mean) / std. Based on modality wide stats.\n Clip = for contrast clipping. This will clip values to the 0.01 and 99.99th percentiles\n and then perform 0-1 normalization.\n \"\"\"\n accepted_schemes = [\"clipping\", \"minmax\", \"no_norm\", \"standardize\", \"volume_wise_znorm\"]\n\n assert scheme in accepted_schemes, \"invalid normalization scheme inserted\" f\"attempted scheme: {scheme}\"\n assert array is not None\n\n if scheme == \"no_norm\":\n return array\n\n elif scheme == \"minmax\":\n assert intensities is not None, \"ERROR: dataset wide stats are required for minmax\"\n return (array - intensities[\"min\"]) / (intensities[\"max\"] - intensities[\"min\"])\n\n elif scheme == \"standardize\":\n assert intensities is not None, \"ERROR: dataset wide stats are required for standardize\"\n return (array - float(intensities[\"mean\"])) / float(intensities[\"std\"])\n\n elif scheme == \"clip\":\n lower_bound, upper_bound = np.percentile(array, (0.01, 99.99))\n array = exposure.rescale_intensity(array, in_range=(lower_bound, upper_bound), out_range=(0, 1))\n return array\n\n elif scheme == \"volume_wise_znorm\":\n empty_val = array.min() # We assume the background is the minimum value\n\n if empty_val != array[0, 0, 0]:\n warnings.warn(\n \"Tried to normalize an array where the top right value was not the same as the minimum value.\"\n f\"empty_val: {empty_val}, top right: {array[0, 0, 0]}\"\n )\n mask = array != empty_val\n array = clamp(array, mask=mask)\n array = znormalize(array, mask=mask)\n array = rescale(array, range=(0, 1))\n return array" }, { "identifier": "get_nib_spacing", "path": "yucca/utils/nib_utils.py", "snippet": "def get_nib_spacing(nib_image: nib.Nifti1Image) -> np.ndarray:\n return np.array(nib_image.header.get_zooms())" }, { "identifier": "get_nib_orientation", "path": "yucca/utils/nib_utils.py", "snippet": "def get_nib_orientation(nib_image: nib.Nifti1Image) -> str:\n affine = nib_image.affine\n return \"\".join(nio.aff2axcodes(affine))" }, { "identifier": "reorient_nib_image", "path": "yucca/utils/nib_utils.py", "snippet": "def reorient_nib_image(nib_image, original_orientation: str, target_orientation: str) -> np.ndarray:\n # The reason we don't use the affine information to get original_orientation is that it can be\n # incorrect. Therefore it can be manually specified. In the cases where header can be trusted,\n # Just use get_nib_orientation to get the original_orientation.\n if original_orientation == target_orientation:\n return nib_image\n start = nio.axcodes2ornt(original_orientation)\n end = nio.axcodes2ornt(target_orientation)\n orientation = nio.ornt_transform(start, end)\n return nib_image.as_reoriented(orientation)" }, { "identifier": "nifti_or_np_to_np", "path": "yucca/utils/type_conversions.py", "snippet": "def nifti_or_np_to_np(array: Union[np.ndarray, nib.Nifti1Image]) -> np.ndarray:\n if isinstance(array, np.ndarray):\n return array\n if isinstance(array, nib.Nifti1Image):\n return array.get_fdata().astype(np.float32)\n else:\n raise TypeError(f\"File data type invalid. Found: {type(array)} and expected nib.Nifti1Image or np.ndarray\")" }, { "identifier": "get_bbox_for_foreground", "path": "yucca/image_processing/objects/BoundingBox.py", "snippet": "def get_bbox_for_foreground(array, background_label=0):\n array = deepcopy(array)\n array[array != background_label] = 1\n return get_bbox_for_label(array, label=1)" }, { "identifier": "crop_to_box", "path": "yucca/image_processing/cropping_and_padding.py", "snippet": "def crop_to_box(array, bbox):\n \"\"\"\n Crops a 3D array to the Bounding Box indices\n Should be a list of [xmin, xmax, ymin, ymax (, zmin, zmax)]\n \"\"\"\n if len(bbox) > 5:\n bbox_slices = (\n slice(bbox[0], bbox[1]),\n slice(bbox[2], bbox[3]),\n slice(bbox[4], bbox[5]),\n )\n else:\n bbox_slices = (slice(bbox[0], bbox[1]), slice(bbox[2], bbox[3]))\n return array[bbox_slices]" }, { "identifier": "pad_to_size", "path": "yucca/image_processing/cropping_and_padding.py", "snippet": "def pad_to_size(array, size):\n pad_box = get_pad_box(array, size)\n if len(pad_box) > 5:\n array_padded = np.pad(\n array,\n (\n (pad_box[0], pad_box[1]),\n (pad_box[2], pad_box[3]),\n (pad_box[4], pad_box[5]),\n ),\n mode=\"edge\",\n )\n return array_padded, pad_box\n\n array_padded = np.pad(array, ((pad_box[0], pad_box[1]), (pad_box[2], pad_box[3])), mode=\"edge\")\n return array_padded, pad_box" } ]
import numpy as np import torch import nibabel as nib import os import cc3d from yucca.preprocessing.YuccaPreprocessor import YuccaPreprocessor from yucca.paths import yucca_preprocessed_data, yucca_raw_data from yucca.preprocessing.normalization import normalizer from yucca.utils.nib_utils import get_nib_spacing, get_nib_orientation, reorient_nib_image from yucca.utils.type_conversions import nifti_or_np_to_np from yucca.image_processing.objects.BoundingBox import get_bbox_for_foreground from yucca.image_processing.cropping_and_padding import crop_to_box, pad_to_size from multiprocessing import Pool from skimage.transform import resize from batchgenerators.utilities.file_and_folder_operations import ( join, load_json, subfiles, save_pickle, maybe_mkdir_p, isfile, subdirs, )
11,295
np.save(arraypath, images) # save metadata as .pkl save_pickle(image_props, picklepath) def _resample_and_normalize_case( self, images: list, seg: np.ndarray = None, norm_op=None, transpose=None, original_spacing=None, target_spacing=None ): # Normalize and Transpose images to target view. # Transpose segmentations to target view. assert len(images) == len(norm_op) == len(self.intensities), ( "number of images, " "normalization operations and intensities does not match. \n" f"len(images) == {len(images)} \n" f"len(norm_op) == {len(norm_op)} \n" f"len(self.intensities) == {len(self.intensities)} \n" ) for i in range(len(images)): images[i] = normalizer(images[i], scheme=norm_op[i], intensities=self.intensities[i]) assert len(images[i].shape) == len(transpose), ( "image and transpose axes do not match. \n" f"images[i].shape == {images[i].shape} \n" f"transpose == {transpose} \n" f"len(images[i].shape) == {len(images[i]).shape} \n" f"len(transpose) == {len(transpose)} \n" ) images[i] = images[i].transpose(transpose) print(f"Normalized with: {norm_op[0]} \n" f"Transposed with: {transpose}") shape_t = images[0].shape original_spacing_t = original_spacing[transpose] target_spacing_t = target_spacing[transpose] # Find new shape based on the target spacing target_shape = np.round((original_spacing_t / target_spacing_t).astype(float) * shape_t).astype(int) # Resample to target shape and spacing for i in range(len(images)): try: images[i] = resize(images[i], output_shape=target_shape, order=3) except OverflowError: print("Unexpected values in either shape or image for resize") if seg is not None: seg = seg.transpose(transpose) try: seg = resize(seg, output_shape=target_shape, order=0, anti_aliasing=False) except OverflowError: print("Unexpected values in either shape or seg for resize") return images, seg return images def preprocess_case_for_inference(self, images: list, patch_size: tuple, do_tta=False): """ Will reorient ONLY if we have valid qform or sform codes. with coded=True the methods will return {affine or None} and {0 or 1}. If both are 0 we cannot rely on headers for orientations and will instead assume images are in the desired orientation already. Afterwards images will be normalized and transposed as specified by the plans file also used in training. Finally images are resampled to the required spacing/size and returned as torch tensors of the required shape (b, c, x, y, (z)) """ assert isinstance(images, list), "image(s) should be a list, even if only one " "image is passed" self.initialize_properties() image_properties = {} images = [nib.load(image) for image in images] image_properties["original_spacing"] = get_nib_spacing(images[0]) image_properties["original_shape"] = np.array(images[0].shape) image_properties["qform"] = images[0].get_qform() image_properties["sform"] = images[0].get_sform() assert len(image_properties["original_shape"]) in [2, 3], "images must be either 2D or 3D for preprocessing" # Check if header is valid and then attempt to orient to target orientation. if ( images[0].get_qform(coded=True)[1] or images[0].get_sform(coded=True)[1] and self.plans.get("target_coordinate_system") ): image_properties["reoriented"] = True original_orientation = get_nib_orientation(images[0]) image_properties["original_orientation"] = original_orientation images = [ reorient_nib_image(image, original_orientation, self.plans["target_coordinate_system"]) for image in images ] image_properties["new_orientation"] = get_nib_orientation(images[0]) else: print("Insufficient header information. Reorientation will not be attempted.") image_properties["reoriented"] = False image_properties["affine"] = images[0].affine images = [nifti_or_np_to_np(image) for image in images] image_properties["uncropped_shape"] = np.array(images[0].shape) if self.plans["crop_to_nonzero"]: nonzero_box = get_bbox_for_foreground(images[0], background_label=0) for i in range(len(images)): images[i] = crop_to_box(images[i], nonzero_box) image_properties["nonzero_box"] = nonzero_box image_properties["cropped_shape"] = np.array(images[0].shape) images = self._resample_and_normalize_case( images, norm_op=self.plans["normalization_scheme"], transpose=self.transpose_forward, original_spacing=image_properties["original_spacing"], target_spacing=self.target_spacing, ) # From this point images are shape (1, c, x, y, z) image_properties["resampled_transposed_shape"] = np.array(images[0].shape) for i in range(len(images)):
""" Takes raw data conforming with Yucca standards and preprocesses according to the generic scheme """ class YuccaMultiTaskPreprocessor(YuccaPreprocessor): """ Multi Task equivalent of the YuccaPreprocessor, which prepares a dataset consisting of a combination of segmentation, classification and registration cases. """ def __init__(self, plans_path, task=None, threads=12, disable_unittests=False): self.name = str(self.__class__.__name__) self.task = task self.plans_path = plans_path self.plans = load_json(plans_path) self.threads = threads self.disable_unittests = disable_unittests # lists for information we would like to attain self.transpose_forward = [] self.transpose_backward = [] self.target_spacing = [] def initialize_paths(self): self.target_dir = join(yucca_preprocessed_data, self.task, self.plans["plans_name"]) self.input_dir = join(yucca_raw_data, self.task) self.imagedirs = join(self.input_dir, "imagesTr") self.labeldirs = join(self.input_dir, "labelsTr") def initialize_properties(self): """ here we basically set up things that are needed for preprocessing during training, but that aren't necessary during inference """ self.dataset_properties = self.plans["dataset_properties"] self.intensities = self.dataset_properties["intensities"] # op values self.transpose_forward = np.array(self.plans["transpose_forward"]) self.transpose_backward = np.array(self.plans["transpose_backward"]) self.target_spacing = np.array(self.plans["target_spacing"]) def run(self): self.initialize_properties() self.initialize_paths() maybe_mkdir_p(self.target_dir) tasks = subdirs(join(self.input_dir, "imagesTr"), join=False) subject_ids = [] for task in tasks: for subject in subfiles(join(self.input_dir, "imagesTr", task), join=False): if subject.endswith("_000.nii.gz"): s = subject[: -len("_000.nii.gz")] subject_ids.append((s, task)) print( f"{'Preprocessing Task:':25.25} {self.task} \n" f"{'Using Planner:':25.25} {self.plans_path} \n" f"{'Crop to nonzero:':25.25} {self.plans['crop_to_nonzero']} \n" f"{'Normalization scheme:':25.25} {self.plans['normalization_scheme']} \n" f"{'Transpose Forward:':25.25} {self.transpose_forward} \n" f"{'Transpose Backward:':25.25} {self.transpose_backward} \n" ) p = Pool(self.threads) p.map(self._preprocess_train_subject, subject_ids) p.close() p.join() def _preprocess_train_subject(self, subject_id_and_task): subject_id, task = subject_id_and_task assert task in ["Classification", "Reconstruction", "Segmentation"] image_props = {} subject_id = subject_id.split(".")[0] print(f"Preprocessing: {subject_id}") arraypath = join(self.target_dir, subject_id + ".npy") picklepath = join(self.target_dir, subject_id + ".pkl") if isfile(arraypath) and isfile(picklepath): print(f"Case: {subject_id} already exists. Skipping.") return # First find relevant images by their paths and save them in the image property pickle # Then load them as images # The '_' in the end is to avoid treating Case_4_000 AND Case_42_000 as different versions # of the seg named Case_4 as both would start with "Case_4", however only the correct one is # followed by an underscore imagepaths = [ impath for impath in subfiles(join(self.imagedirs, task)) if os.path.split(impath)[-1].startswith(subject_id + "_") ] image_props["image files"] = imagepaths images = [nib.load(image) for image in imagepaths] # Do the same with segmentation seg = [ segpath for segpath in subfiles(join(self.labeldirs, task)) if os.path.split(segpath)[-1].startswith(subject_id + ".") ] print(subject_id, seg) image_props["segmentation file"] = seg assert len(seg) < 2, f"unexpected number of segmentations found. Expected 1 or 0 and found {len(seg)}" if task == "Classification": seg = np.load(seg[0]) elif task == "Segmentation": seg = nib.load(seg[0]) else: seg = None if not self.disable_unittests: assert len(images) > 0, f"found no images for {subject_id + '_'}, " f"attempted imagepaths: {imagepaths}" assert ( len(images[0].shape) == self.plans["dataset_properties"]["data_dimensions"] ), f"image should be shape (x, y(, z)) but is {images[0].shape}" # Make sure all modalities are correctly registered if len(images) > 1: for image in images: assert images[0].shape == image.shape, ( f"Sizes do not match for {subject_id}" f"One is: {images[0].shape} while another is {image.shape}" ) assert np.allclose(get_nib_spacing(images[0]), get_nib_spacing(image)), ( f"Spacings do not match for {subject_id}" f"One is: {get_nib_spacing(images[0])} while another is {get_nib_spacing(image)}" ) assert get_nib_orientation(images[0]) == get_nib_orientation(image), ( f"Directions do not match for {subject_id}" f"One is: {get_nib_orientation(images[0])} while another is {get_nib_orientation(image)}" ) original_spacing = get_nib_spacing(images[0]) original_size = np.array(images[0].shape) if self.target_spacing.size: target_spacing = self.target_spacing else: target_spacing = original_spacing # If qform and sform are both missing the header is corrupt and we do not trust the # direction from the affine # Make sure you know what you're doing if images[0].get_qform(coded=True)[1] or images[0].get_sform(coded=True)[1]: original_orientation = get_nib_orientation(images[0]) final_direction = self.plans["target_coordinate_system"] images = [nifti_or_np_to_np(reorient_nib_image(image, original_orientation, final_direction)) for image in images] if isinstance(seg, nib.Nifti1Image): seg = nifti_or_np_to_np(reorient_nib_image(seg, original_orientation, final_direction)) else: original_orientation = "INVALID" final_direction = "INVALID" images = [nifti_or_np_to_np(image) for image in images] if isinstance(seg, nib.Nifti1Image): seg = nifti_or_np_to_np(seg) # Cropping is performed to save computational resources. We are only removing background. if self.plans["crop_to_nonzero"]: nonzero_box = get_bbox_for_foreground(images[0], background_label=0) image_props["crop_to_nonzero"] = nonzero_box for i in range(len(images)): images[i] = crop_to_box(images[i], nonzero_box) if task == "Segmentation": seg = crop_to_box(seg, nonzero_box) else: image_props["crop_to_nonzero"] = self.plans["crop_to_nonzero"] if task != "Segmentation": images = self._resample_and_normalize_case( images, None, self.plans["normalization_scheme"], self.transpose_forward, original_spacing, target_spacing ) if seg is not None: images = np.array((np.array(images).T, seg), dtype="object") images[0] = images[0].T final_size = list(images[0][0].shape) else: images = np.array(images) final_size = list(images[0].shape) else: images, seg = self._resample_and_normalize_case( images, seg, self.plans["normalization_scheme"], self.transpose_forward, original_spacing, target_spacing ) # Stack and fix dimensions images = np.vstack((np.array(images), np.array(seg)[np.newaxis])) final_size = list(images[0].shape) # now AFTER transposition etc., we get some (no need to get all) # locations of foreground, that we will later use in the # oversampling of foreground classes if task == "Segmentation": foreground_locs = np.array(np.nonzero(images[-1])).T[::10] numbered_ground_truth, ground_truth_numb_lesion = cc3d.connected_components( images[-1], connectivity=26, return_N=True ) if ground_truth_numb_lesion == 0: object_sizes = 0 else: object_sizes = [ i * np.prod(target_spacing) for i in np.unique(numbered_ground_truth, return_counts=True)[-1][1:] ] else: foreground_locs = [] numbered_ground_truth = ground_truth_numb_lesion = object_sizes = 0 # save relevant values image_props["original_spacing"] = original_spacing image_props["original_size"] = original_size image_props["original_orientation"] = original_orientation image_props["new_spacing"] = target_spacing[self.transpose_forward].tolist() image_props["new_size"] = final_size image_props["task"] = task image_props["new_direction"] = final_direction image_props["foreground_locations"] = foreground_locs image_props["n_cc"] = ground_truth_numb_lesion image_props["size_cc"] = object_sizes print( f"size before: {original_size} size after: {image_props['new_size']} \n" f"spacing before: {original_spacing} spacing after: {image_props['new_spacing']} \n" f"Saving {subject_id} in {arraypath} \n" ) # save the image np.save(arraypath, images) # save metadata as .pkl save_pickle(image_props, picklepath) def _resample_and_normalize_case( self, images: list, seg: np.ndarray = None, norm_op=None, transpose=None, original_spacing=None, target_spacing=None ): # Normalize and Transpose images to target view. # Transpose segmentations to target view. assert len(images) == len(norm_op) == len(self.intensities), ( "number of images, " "normalization operations and intensities does not match. \n" f"len(images) == {len(images)} \n" f"len(norm_op) == {len(norm_op)} \n" f"len(self.intensities) == {len(self.intensities)} \n" ) for i in range(len(images)): images[i] = normalizer(images[i], scheme=norm_op[i], intensities=self.intensities[i]) assert len(images[i].shape) == len(transpose), ( "image and transpose axes do not match. \n" f"images[i].shape == {images[i].shape} \n" f"transpose == {transpose} \n" f"len(images[i].shape) == {len(images[i]).shape} \n" f"len(transpose) == {len(transpose)} \n" ) images[i] = images[i].transpose(transpose) print(f"Normalized with: {norm_op[0]} \n" f"Transposed with: {transpose}") shape_t = images[0].shape original_spacing_t = original_spacing[transpose] target_spacing_t = target_spacing[transpose] # Find new shape based on the target spacing target_shape = np.round((original_spacing_t / target_spacing_t).astype(float) * shape_t).astype(int) # Resample to target shape and spacing for i in range(len(images)): try: images[i] = resize(images[i], output_shape=target_shape, order=3) except OverflowError: print("Unexpected values in either shape or image for resize") if seg is not None: seg = seg.transpose(transpose) try: seg = resize(seg, output_shape=target_shape, order=0, anti_aliasing=False) except OverflowError: print("Unexpected values in either shape or seg for resize") return images, seg return images def preprocess_case_for_inference(self, images: list, patch_size: tuple, do_tta=False): """ Will reorient ONLY if we have valid qform or sform codes. with coded=True the methods will return {affine or None} and {0 or 1}. If both are 0 we cannot rely on headers for orientations and will instead assume images are in the desired orientation already. Afterwards images will be normalized and transposed as specified by the plans file also used in training. Finally images are resampled to the required spacing/size and returned as torch tensors of the required shape (b, c, x, y, (z)) """ assert isinstance(images, list), "image(s) should be a list, even if only one " "image is passed" self.initialize_properties() image_properties = {} images = [nib.load(image) for image in images] image_properties["original_spacing"] = get_nib_spacing(images[0]) image_properties["original_shape"] = np.array(images[0].shape) image_properties["qform"] = images[0].get_qform() image_properties["sform"] = images[0].get_sform() assert len(image_properties["original_shape"]) in [2, 3], "images must be either 2D or 3D for preprocessing" # Check if header is valid and then attempt to orient to target orientation. if ( images[0].get_qform(coded=True)[1] or images[0].get_sform(coded=True)[1] and self.plans.get("target_coordinate_system") ): image_properties["reoriented"] = True original_orientation = get_nib_orientation(images[0]) image_properties["original_orientation"] = original_orientation images = [ reorient_nib_image(image, original_orientation, self.plans["target_coordinate_system"]) for image in images ] image_properties["new_orientation"] = get_nib_orientation(images[0]) else: print("Insufficient header information. Reorientation will not be attempted.") image_properties["reoriented"] = False image_properties["affine"] = images[0].affine images = [nifti_or_np_to_np(image) for image in images] image_properties["uncropped_shape"] = np.array(images[0].shape) if self.plans["crop_to_nonzero"]: nonzero_box = get_bbox_for_foreground(images[0], background_label=0) for i in range(len(images)): images[i] = crop_to_box(images[i], nonzero_box) image_properties["nonzero_box"] = nonzero_box image_properties["cropped_shape"] = np.array(images[0].shape) images = self._resample_and_normalize_case( images, norm_op=self.plans["normalization_scheme"], transpose=self.transpose_forward, original_spacing=image_properties["original_spacing"], target_spacing=self.target_spacing, ) # From this point images are shape (1, c, x, y, z) image_properties["resampled_transposed_shape"] = np.array(images[0].shape) for i in range(len(images)):
images[i], padding = pad_to_size(images[i], patch_size)
9
2023-10-26 08:13:03+00:00
16k
Elfenreigen/UniChest
optim/optim_factory.py
[ { "identifier": "Adafactor", "path": "optim/adafactor.py", "snippet": "class Adafactor(torch.optim.Optimizer):\n \"\"\"Implements Adafactor algorithm.\n This implementation is based on: `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost`\n (see https://arxiv.org/abs/1804.04235)\n\n Note that this optimizer internally adjusts the learning rate depending on the\n *scale_parameter*, *relative_step* and *warmup_init* options.\n\n To use a manual (external) learning rate schedule you should set `scale_parameter=False` and\n `relative_step=False`.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining parameter groups\n lr (float, optional): external learning rate (default: None)\n eps (tuple[float, float]): regularization constants for square gradient\n and parameter scale respectively (default: (1e-30, 1e-3))\n clip_threshold (float): threshold of root mean square of final gradient update (default: 1.0)\n decay_rate (float): coefficient used to compute running averages of square gradient (default: -0.8)\n beta1 (float): coefficient used for computing running averages of gradient (default: None)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n scale_parameter (bool): if True, learning rate is scaled by root mean square of parameter (default: True)\n relative_step (bool): if True, time-dependent learning rate is computed\n instead of external learning rate (default: True)\n warmup_init (bool): time-dependent learning rate computation depends on\n whether warm-up initialization is being used (default: False)\n \"\"\"\n\n def __init__(self, params, lr=None, eps=1e-30, eps_scale=1e-3, clip_threshold=1.0,\n decay_rate=-0.8, betas=None, weight_decay=0.0, scale_parameter=True, warmup_init=False):\n relative_step = lr is None\n if warmup_init and not relative_step:\n raise ValueError('warmup_init requires relative_step=True')\n\n beta1 = None if betas is None else betas[0] # make it compat with standard betas arg\n defaults = dict(lr=lr, eps=eps, eps_scale=eps_scale, clip_threshold=clip_threshold, decay_rate=decay_rate,\n beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter,\n relative_step=relative_step, warmup_init=warmup_init)\n super(Adafactor, self).__init__(params, defaults)\n\n @staticmethod\n def _get_lr(param_group, param_state):\n if param_group['relative_step']:\n min_step = 1e-6 * param_state['step'] if param_group['warmup_init'] else 1e-2\n lr_t = min(min_step, 1.0 / math.sqrt(param_state['step']))\n param_scale = 1.0\n if param_group['scale_parameter']:\n param_scale = max(param_group['eps_scale'], param_state['RMS'])\n param_group['lr'] = lr_t * param_scale\n return param_group['lr']\n\n @staticmethod\n def _get_options(param_group, param_shape):\n factored = len(param_shape) >= 2\n use_first_moment = param_group['beta1'] is not None\n return factored, use_first_moment\n\n @staticmethod\n def _rms(tensor):\n return tensor.norm(2) / (tensor.numel() ** 0.5)\n\n def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col):\n r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1)\n c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt()\n return torch.mul(r_factor, c_factor)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.dtype in {torch.float16, torch.bfloat16}:\n grad = grad.float()\n if grad.is_sparse:\n raise RuntimeError('Adafactor does not support sparse gradients.')\n\n state = self.state[p]\n grad_shape = grad.shape\n\n factored, use_first_moment = self._get_options(group, grad_shape)\n # State Initialization\n if len(state) == 0:\n state['step'] = 0\n\n if use_first_moment:\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(grad)\n if factored:\n state['exp_avg_sq_row'] = torch.zeros(grad_shape[:-1]).to(grad)\n state['exp_avg_sq_col'] = torch.zeros(grad_shape[:-2] + grad_shape[-1:]).to(grad)\n else:\n state['exp_avg_sq'] = torch.zeros_like(grad)\n\n state['RMS'] = 0\n else:\n if use_first_moment:\n state['exp_avg'] = state['exp_avg'].to(grad)\n if factored:\n state['exp_avg_sq_row'] = state['exp_avg_sq_row'].to(grad)\n state['exp_avg_sq_col'] = state['exp_avg_sq_col'].to(grad)\n else:\n state['exp_avg_sq'] = state['exp_avg_sq'].to(grad)\n\n p_data_fp32 = p.data\n if p.data.dtype in {torch.float16, torch.bfloat16}:\n p_data_fp32 = p_data_fp32.float()\n\n state['step'] += 1\n state['RMS'] = self._rms(p_data_fp32)\n lr_t = self._get_lr(group, state)\n\n beta2t = 1.0 - math.pow(state['step'], group['decay_rate'])\n update = grad ** 2 + group['eps']\n if factored:\n exp_avg_sq_row = state['exp_avg_sq_row']\n exp_avg_sq_col = state['exp_avg_sq_col']\n\n exp_avg_sq_row.mul_(beta2t).add_(1.0 - beta2t, update.mean(dim=-1))\n exp_avg_sq_col.mul_(beta2t).add_(1.0 - beta2t, update.mean(dim=-2))\n #exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=1.0 - beta2t) # pytorch 1.6+\n #exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=1.0 - beta2t)\n\n # Approximation of exponential moving average of square of gradient\n update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col)\n update.mul_(grad)\n else:\n exp_avg_sq = state['exp_avg_sq']\n\n exp_avg_sq.mul_(beta2t).add_(1.0 - beta2t, update)\n #exp_avg_sq.mul_(beta2t).add_(update, alpha=1.0 - beta2t) # pytorch 1.6+\n update = exp_avg_sq.rsqrt().mul_(grad)\n\n update.div_((self._rms(update) / group['clip_threshold']).clamp_(min=1.0))\n update.mul_(lr_t)\n\n if use_first_moment:\n exp_avg = state['exp_avg']\n exp_avg.mul_(group[\"beta1\"]).add_(1 - group[\"beta1\"], update)\n #exp_avg.mul_(group['beta1']).add_(update, alpha=1 - group['beta1']) # pytorch 1.6+\n update = exp_avg\n\n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group[\"weight_decay\"] * lr_t, p_data_fp32)\n #p_data_fp32.add_(p_data_fp32, alpha=-group['weight_decay'] * lr_t) # pytorch 1.6+\n\n p_data_fp32.add_(-update)\n\n if p.data.dtype in {torch.float16, torch.bfloat16}:\n p.data.copy_(p_data_fp32)\n\n return loss" }, { "identifier": "Adahessian", "path": "optim/adahessian.py", "snippet": "class Adahessian(torch.optim.Optimizer):\n \"\"\"\n Implements the AdaHessian algorithm from \"ADAHESSIAN: An Adaptive Second OrderOptimizer for Machine Learning\"\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining parameter groups\n lr (float, optional): learning rate (default: 0.1)\n betas ((float, float), optional): coefficients used for computing running averages of gradient and the\n squared hessian trace (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0.0)\n hessian_power (float, optional): exponent of the hessian trace (default: 1.0)\n update_each (int, optional): compute the hessian trace approximation only after *this* number of steps\n (to save time) (default: 1)\n n_samples (int, optional): how many times to sample `z` for the approximation of the hessian trace (default: 1)\n \"\"\"\n\n def __init__(self, params, lr=0.1, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0,\n hessian_power=1.0, update_each=1, n_samples=1, avg_conv_kernel=False):\n if not 0.0 <= lr:\n raise ValueError(f\"Invalid learning rate: {lr}\")\n if not 0.0 <= eps:\n raise ValueError(f\"Invalid epsilon value: {eps}\")\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(f\"Invalid beta parameter at index 0: {betas[0]}\")\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(f\"Invalid beta parameter at index 1: {betas[1]}\")\n if not 0.0 <= hessian_power <= 1.0:\n raise ValueError(f\"Invalid Hessian power value: {hessian_power}\")\n\n self.n_samples = n_samples\n self.update_each = update_each\n self.avg_conv_kernel = avg_conv_kernel\n\n # use a separate generator that deterministically generates the same `z`s across all GPUs in case of distributed training\n self.seed = 2147483647\n self.generator = torch.Generator().manual_seed(self.seed)\n\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, hessian_power=hessian_power)\n super(Adahessian, self).__init__(params, defaults)\n\n for p in self.get_params():\n p.hess = 0.0\n self.state[p][\"hessian step\"] = 0\n\n @property\n def is_second_order(self):\n return True\n\n def get_params(self):\n \"\"\"\n Gets all parameters in all param_groups with gradients\n \"\"\"\n\n return (p for group in self.param_groups for p in group['params'] if p.requires_grad)\n\n def zero_hessian(self):\n \"\"\"\n Zeros out the accumalated hessian traces.\n \"\"\"\n\n for p in self.get_params():\n if not isinstance(p.hess, float) and self.state[p][\"hessian step\"] % self.update_each == 0:\n p.hess.zero_()\n\n @torch.no_grad()\n def set_hessian(self):\n \"\"\"\n Computes the Hutchinson approximation of the hessian trace and accumulates it for each trainable parameter.\n \"\"\"\n\n params = []\n for p in filter(lambda p: p.grad is not None, self.get_params()):\n if self.state[p][\"hessian step\"] % self.update_each == 0: # compute the trace only each `update_each` step\n params.append(p)\n self.state[p][\"hessian step\"] += 1\n\n if len(params) == 0:\n return\n\n if self.generator.device != params[0].device: # hackish way of casting the generator to the right device\n self.generator = torch.Generator(params[0].device).manual_seed(self.seed)\n\n grads = [p.grad for p in params]\n\n for i in range(self.n_samples):\n # Rademacher distribution {-1.0, 1.0}\n zs = [torch.randint(0, 2, p.size(), generator=self.generator, device=p.device) * 2.0 - 1.0 for p in params]\n h_zs = torch.autograd.grad(\n grads, params, grad_outputs=zs, only_inputs=True, retain_graph=i < self.n_samples - 1)\n for h_z, z, p in zip(h_zs, zs, params):\n p.hess += h_z * z / self.n_samples # approximate the expected values of z*(H@z)\n\n @torch.no_grad()\n def step(self, closure=None):\n \"\"\"\n Performs a single optimization step.\n Arguments:\n closure (callable, optional) -- a closure that reevaluates the model and returns the loss (default: None)\n \"\"\"\n\n loss = None\n if closure is not None:\n loss = closure()\n\n self.zero_hessian()\n self.set_hessian()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None or p.hess is None:\n continue\n\n if self.avg_conv_kernel and p.dim() == 4:\n p.hess = torch.abs(p.hess).mean(dim=[2, 3], keepdim=True).expand_as(p.hess).clone()\n\n # Perform correct stepweight decay as in AdamW\n p.mul_(1 - group['lr'] * group['weight_decay'])\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 1:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(p)\n # Exponential moving average of Hessian diagonal square values\n state['exp_hessian_diag_sq'] = torch.zeros_like(p)\n\n exp_avg, exp_hessian_diag_sq = state['exp_avg'], state['exp_hessian_diag_sq']\n beta1, beta2 = group['betas']\n state['step'] += 1\n\n # Decay the first and second moment running average coefficient\n exp_avg.mul_(beta1).add_(p.grad, alpha=1 - beta1)\n exp_hessian_diag_sq.mul_(beta2).addcmul_(p.hess, p.hess, value=1 - beta2)\n\n bias_correction1 = 1 - beta1 ** state['step']\n bias_correction2 = 1 - beta2 ** state['step']\n\n k = group['hessian_power']\n denom = (exp_hessian_diag_sq / bias_correction2).pow_(k / 2).add_(group['eps'])\n\n # make update\n step_size = group['lr'] / bias_correction1\n p.addcdiv_(exp_avg, denom, value=-step_size)\n\n return loss" }, { "identifier": "AdamP", "path": "optim/adamp.py", "snippet": "class AdamP(Optimizer):\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,\n weight_decay=0, delta=0.1, wd_ratio=0.1, nesterov=False):\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,\n delta=delta, wd_ratio=wd_ratio, nesterov=nesterov)\n super(AdamP, self).__init__(params, defaults)\n\n def _channel_view(self, x):\n return x.view(x.size(0), -1)\n\n def _layer_view(self, x):\n return x.view(1, -1)\n\n def _cosine_similarity(self, x, y, eps, view_func):\n x = view_func(x)\n y = view_func(y)\n\n x_norm = x.norm(dim=1).add_(eps)\n y_norm = y.norm(dim=1).add_(eps)\n dot = (x * y).sum(dim=1)\n\n return dot.abs() / x_norm / y_norm\n\n def _projection(self, p, grad, perturb, delta, wd_ratio, eps):\n wd = 1\n expand_size = [-1] + [1] * (len(p.shape) - 1)\n for view_func in [self._channel_view, self._layer_view]:\n\n cosine_sim = self._cosine_similarity(grad, p.data, eps, view_func)\n\n if cosine_sim.max() < delta / math.sqrt(view_func(p.data).size(1)):\n p_n = p.data / view_func(p.data).norm(dim=1).view(expand_size).add_(eps)\n perturb -= p_n * view_func(p_n * perturb).sum(dim=1).view(expand_size)\n wd = wd_ratio\n\n return perturb, wd\n\n return perturb, wd\n\n def step(self, closure=None):\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n\n grad = p.grad.data\n beta1, beta2 = group['betas']\n nesterov = group['nesterov']\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n state['exp_avg'] = torch.zeros_like(p.data)\n state['exp_avg_sq'] = torch.zeros_like(p.data)\n\n # Adam\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n\n state['step'] += 1\n bias_correction1 = 1 - beta1 ** state['step']\n bias_correction2 = 1 - beta2 ** state['step']\n\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n\n denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])\n step_size = group['lr'] / bias_correction1\n\n if nesterov:\n perturb = (beta1 * exp_avg + (1 - beta1) * grad) / denom\n else:\n perturb = exp_avg / denom\n\n # Projection\n wd_ratio = 1\n if len(p.shape) > 1:\n perturb, wd_ratio = self._projection(p, grad, perturb, group['delta'], group['wd_ratio'], group['eps'])\n\n # Weight decay\n if group['weight_decay'] > 0:\n p.data.mul_(1 - group['lr'] * group['weight_decay'] * wd_ratio)\n\n # Step\n p.data.add_(-step_size, perturb)\n\n return loss" }, { "identifier": "Lookahead", "path": "optim/lookahead.py", "snippet": "class Lookahead(Optimizer):\n def __init__(self, base_optimizer, alpha=0.5, k=6):\n if not 0.0 <= alpha <= 1.0:\n raise ValueError(f'Invalid slow update rate: {alpha}')\n if not 1 <= k:\n raise ValueError(f'Invalid lookahead steps: {k}')\n defaults = dict(lookahead_alpha=alpha, lookahead_k=k, lookahead_step=0)\n self.base_optimizer = base_optimizer\n self.param_groups = self.base_optimizer.param_groups\n self.defaults = base_optimizer.defaults\n self.defaults.update(defaults)\n self.state = defaultdict(dict)\n # manually add our defaults to the param groups\n for name, default in defaults.items():\n for group in self.param_groups:\n group.setdefault(name, default)\n\n def update_slow(self, group):\n for fast_p in group[\"params\"]:\n if fast_p.grad is None:\n continue\n param_state = self.state[fast_p]\n if 'slow_buffer' not in param_state:\n param_state['slow_buffer'] = torch.empty_like(fast_p.data)\n param_state['slow_buffer'].copy_(fast_p.data)\n slow = param_state['slow_buffer']\n slow.add_(group['lookahead_alpha'], fast_p.data - slow)\n fast_p.data.copy_(slow)\n\n def sync_lookahead(self):\n for group in self.param_groups:\n self.update_slow(group)\n\n def step(self, closure=None):\n #assert id(self.param_groups) == id(self.base_optimizer.param_groups)\n loss = self.base_optimizer.step(closure)\n for group in self.param_groups:\n group['lookahead_step'] += 1\n if group['lookahead_step'] % group['lookahead_k'] == 0:\n self.update_slow(group)\n return loss\n\n def state_dict(self):\n fast_state_dict = self.base_optimizer.state_dict()\n slow_state = {\n (id(k) if isinstance(k, torch.Tensor) else k): v\n for k, v in self.state.items()\n }\n fast_state = fast_state_dict['state']\n param_groups = fast_state_dict['param_groups']\n return {\n 'state': fast_state,\n 'slow_state': slow_state,\n 'param_groups': param_groups,\n }\n\n def load_state_dict(self, state_dict):\n fast_state_dict = {\n 'state': state_dict['state'],\n 'param_groups': state_dict['param_groups'],\n }\n self.base_optimizer.load_state_dict(fast_state_dict)\n\n # We want to restore the slow state, but share param_groups reference\n # with base_optimizer. This is a bit redundant but least code\n slow_state_new = False\n if 'slow_state' not in state_dict:\n print('Loading state_dict from optimizer without Lookahead applied.')\n state_dict['slow_state'] = defaultdict(dict)\n slow_state_new = True\n slow_state_dict = {\n 'state': state_dict['slow_state'],\n 'param_groups': state_dict['param_groups'], # this is pointless but saves code\n }\n super(Lookahead, self).load_state_dict(slow_state_dict)\n self.param_groups = self.base_optimizer.param_groups # make both ref same container\n if slow_state_new:\n # reapply defaults to catch missing lookahead specific ones\n for name, default in self.defaults.items():\n for group in self.param_groups:\n group.setdefault(name, default)" }, { "identifier": "Nadam", "path": "optim/nadam.py", "snippet": "class Nadam(Optimizer):\n \"\"\"Implements Nadam algorithm (a variant of Adam based on Nesterov momentum).\n\n It has been proposed in `Incorporating Nesterov Momentum into Adam`__.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 2e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n schedule_decay (float, optional): momentum schedule decay (default: 4e-3)\n\n __ http://cs229.stanford.edu/proj2015/054_report.pdf\n __ http://www.cs.toronto.edu/~fritz/absps/momentum.pdf\n\n Originally taken from: https://github.com/pytorch/pytorch/pull/1408\n NOTE: Has potential issues but does work well on some problems.\n \"\"\"\n\n def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8,\n weight_decay=0, schedule_decay=4e-3):\n defaults = dict(lr=lr, betas=betas, eps=eps,\n weight_decay=weight_decay, schedule_decay=schedule_decay)\n super(Nadam, self).__init__(params, defaults)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n state['m_schedule'] = 1.\n state['exp_avg'] = grad.new().resize_as_(grad).zero_()\n state['exp_avg_sq'] = grad.new().resize_as_(grad).zero_()\n\n # Warming momentum schedule\n m_schedule = state['m_schedule']\n schedule_decay = group['schedule_decay']\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n eps = group['eps']\n state['step'] += 1\n t = state['step']\n\n if group['weight_decay'] != 0:\n grad = grad.add(group['weight_decay'], p.data)\n\n momentum_cache_t = beta1 * \\\n (1. - 0.5 * (0.96 ** (t * schedule_decay)))\n momentum_cache_t_1 = beta1 * \\\n (1. - 0.5 * (0.96 ** ((t + 1) * schedule_decay)))\n m_schedule_new = m_schedule * momentum_cache_t\n m_schedule_next = m_schedule * momentum_cache_t * momentum_cache_t_1\n state['m_schedule'] = m_schedule_new\n\n # Decay the first and second moment running average coefficient\n exp_avg.mul_(beta1).add_(1. - beta1, grad)\n exp_avg_sq.mul_(beta2).addcmul_(1. - beta2, grad, grad)\n exp_avg_sq_prime = exp_avg_sq / (1. - beta2 ** t)\n denom = exp_avg_sq_prime.sqrt_().add_(eps)\n\n p.data.addcdiv_(-group['lr'] * (1. - momentum_cache_t) / (1. - m_schedule_new), grad, denom)\n p.data.addcdiv_(-group['lr'] * momentum_cache_t_1 / (1. - m_schedule_next), exp_avg, denom)\n\n return loss" }, { "identifier": "NovoGrad", "path": "optim/novograd.py", "snippet": "class NovoGrad(Optimizer):\n def __init__(self, params, grad_averaging=False, lr=0.1, betas=(0.95, 0.98), eps=1e-8, weight_decay=0):\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)\n super(NovoGrad, self).__init__(params, defaults)\n self._lr = lr\n self._beta1 = betas[0]\n self._beta2 = betas[1]\n self._eps = eps\n self._wd = weight_decay\n self._grad_averaging = grad_averaging\n\n self._momentum_initialized = False\n\n def step(self, closure=None):\n loss = None\n if closure is not None:\n loss = closure()\n\n if not self._momentum_initialized:\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n state = self.state[p]\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('NovoGrad does not support sparse gradients')\n\n v = torch.norm(grad)**2\n m = grad/(torch.sqrt(v) + self._eps) + self._wd * p.data\n state['step'] = 0\n state['v'] = v\n state['m'] = m\n state['grad_ema'] = None\n self._momentum_initialized = True\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n state = self.state[p]\n state['step'] += 1\n\n step, v, m = state['step'], state['v'], state['m']\n grad_ema = state['grad_ema']\n\n grad = p.grad.data\n g2 = torch.norm(grad)**2\n grad_ema = g2 if grad_ema is None else grad_ema * \\\n self._beta2 + g2 * (1. - self._beta2)\n grad *= 1.0 / (torch.sqrt(grad_ema) + self._eps)\n\n if self._grad_averaging:\n grad *= (1. - self._beta1)\n\n g2 = torch.norm(grad)**2\n v = self._beta2*v + (1. - self._beta2)*g2\n m = self._beta1*m + (grad / (torch.sqrt(v) + self._eps) + self._wd * p.data)\n bias_correction1 = 1 - self._beta1 ** step\n bias_correction2 = 1 - self._beta2 ** step\n step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1\n\n state['v'], state['m'] = v, m\n state['grad_ema'] = grad_ema\n p.data.add_(-step_size, m)\n return loss" }, { "identifier": "NvNovoGrad", "path": "optim/nvnovograd.py", "snippet": "class NvNovoGrad(Optimizer):\n \"\"\"\n Implements Novograd algorithm.\n\n Args:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.95, 0.98))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n grad_averaging: gradient averaging\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n (default: False)\n \"\"\"\n\n def __init__(self, params, lr=1e-3, betas=(0.95, 0.98), eps=1e-8,\n weight_decay=0, grad_averaging=False, amsgrad=False):\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\n defaults = dict(lr=lr, betas=betas, eps=eps,\n weight_decay=weight_decay,\n grad_averaging=grad_averaging,\n amsgrad=amsgrad)\n\n super(NvNovoGrad, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(NvNovoGrad, self).__setstate__(state)\n for group in self.param_groups:\n group.setdefault('amsgrad', False)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('Sparse gradients are not supported.')\n amsgrad = group['amsgrad']\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device)\n if amsgrad:\n # Maintains max of all exp. moving avg. of sq. grad. values\n state['max_exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n if amsgrad:\n max_exp_avg_sq = state['max_exp_avg_sq']\n beta1, beta2 = group['betas']\n\n state['step'] += 1\n\n norm = torch.sum(torch.pow(grad, 2))\n\n if exp_avg_sq == 0:\n exp_avg_sq.copy_(norm)\n else:\n exp_avg_sq.mul_(beta2).add_(1 - beta2, norm)\n\n if amsgrad:\n # Maintains the maximum of all 2nd moment running avg. till now\n torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)\n # Use the max. for normalizing running avg. of gradient\n denom = max_exp_avg_sq.sqrt().add_(group['eps'])\n else:\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n\n grad.div_(denom)\n if group['weight_decay'] != 0:\n grad.add_(group['weight_decay'], p.data)\n if group['grad_averaging']:\n grad.mul_(1 - beta1)\n exp_avg.mul_(beta1).add_(grad)\n\n p.data.add_(-group['lr'], exp_avg)\n\n return loss" }, { "identifier": "RAdam", "path": "optim/radam.py", "snippet": "class RAdam(Optimizer):\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)\n self.buffer = [[None, None, None] for ind in range(10)]\n super(RAdam, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(RAdam, self).__setstate__(state)\n\n def step(self, closure=None):\n\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data.float()\n if grad.is_sparse:\n raise RuntimeError('RAdam does not support sparse gradients')\n\n p_data_fp32 = p.data.float()\n\n state = self.state[p]\n\n if len(state) == 0:\n state['step'] = 0\n state['exp_avg'] = torch.zeros_like(p_data_fp32)\n state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)\n else:\n state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)\n state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n\n state['step'] += 1\n buffered = self.buffer[int(state['step'] % 10)]\n if state['step'] == buffered[0]:\n N_sma, step_size = buffered[1], buffered[2]\n else:\n buffered[0] = state['step']\n beta2_t = beta2 ** state['step']\n N_sma_max = 2 / (1 - beta2) - 1\n N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)\n buffered[1] = N_sma\n\n # more conservative since it's an approximated value\n if N_sma >= 5:\n step_size = group['lr'] * math.sqrt(\n (1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (\n N_sma_max - 2)) / (1 - beta1 ** state['step'])\n else:\n step_size = group['lr'] / (1 - beta1 ** state['step'])\n buffered[2] = step_size\n\n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\n\n # more conservative since it's an approximated value\n if N_sma >= 5:\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n p_data_fp32.addcdiv_(-step_size, exp_avg, denom)\n else:\n p_data_fp32.add_(-step_size, exp_avg)\n\n p.data.copy_(p_data_fp32)\n\n return loss" }, { "identifier": "RMSpropTF", "path": "optim/rmsprop_tf.py", "snippet": "class RMSpropTF(Optimizer):\n \"\"\"Implements RMSprop algorithm (TensorFlow style epsilon)\n\n NOTE: This is a direct cut-and-paste of PyTorch RMSprop with eps applied before sqrt\n and a few other modifications to closer match Tensorflow for matching hyper-params.\n\n Noteworthy changes include:\n 1. Epsilon applied inside square-root\n 2. square_avg initialized to ones\n 3. LR scaling of update accumulated in momentum buffer\n\n Proposed by G. Hinton in his\n `course <http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`_.\n\n The centered version first appears in `Generating Sequences\n With Recurrent Neural Networks <https://arxiv.org/pdf/1308.0850v5.pdf>`_.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-2)\n momentum (float, optional): momentum factor (default: 0)\n alpha (float, optional): smoothing (decay) constant (default: 0.9)\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-10)\n centered (bool, optional) : if ``True``, compute the centered RMSProp,\n the gradient is normalized by an estimation of its variance\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n decoupled_decay (bool, optional): decoupled weight decay as per https://arxiv.org/abs/1711.05101\n lr_in_momentum (bool, optional): learning rate scaling is included in the momentum buffer\n update as per defaults in Tensorflow\n\n \"\"\"\n\n def __init__(self, params, lr=1e-2, alpha=0.9, eps=1e-10, weight_decay=0, momentum=0., centered=False,\n decoupled_decay=False, lr_in_momentum=True):\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n if not 0.0 <= momentum:\n raise ValueError(\"Invalid momentum value: {}\".format(momentum))\n if not 0.0 <= weight_decay:\n raise ValueError(\"Invalid weight_decay value: {}\".format(weight_decay))\n if not 0.0 <= alpha:\n raise ValueError(\"Invalid alpha value: {}\".format(alpha))\n\n defaults = dict(lr=lr, momentum=momentum, alpha=alpha, eps=eps, centered=centered, weight_decay=weight_decay,\n decoupled_decay=decoupled_decay, lr_in_momentum=lr_in_momentum)\n super(RMSpropTF, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(RMSpropTF, self).__setstate__(state)\n for group in self.param_groups:\n group.setdefault('momentum', 0)\n group.setdefault('centered', False)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('RMSprop does not support sparse gradients')\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n state['square_avg'] = torch.ones_like(p.data) # PyTorch inits to zero\n if group['momentum'] > 0:\n state['momentum_buffer'] = torch.zeros_like(p.data)\n if group['centered']:\n state['grad_avg'] = torch.zeros_like(p.data)\n\n square_avg = state['square_avg']\n one_minus_alpha = 1. - group['alpha']\n\n state['step'] += 1\n\n if group['weight_decay'] != 0:\n if 'decoupled_decay' in group and group['decoupled_decay']:\n p.data.add_(-group['weight_decay'], p.data)\n else:\n grad = grad.add(group['weight_decay'], p.data)\n\n # Tensorflow order of ops for updating squared avg\n square_avg.add_(one_minus_alpha, grad.pow(2) - square_avg)\n # square_avg.mul_(alpha).addcmul_(1 - alpha, grad, grad) # PyTorch original\n\n if group['centered']:\n grad_avg = state['grad_avg']\n grad_avg.add_(one_minus_alpha, grad - grad_avg)\n # grad_avg.mul_(alpha).add_(1 - alpha, grad) # PyTorch original\n avg = square_avg.addcmul(-1, grad_avg, grad_avg).add(group['eps']).sqrt_() # eps moved in sqrt\n else:\n avg = square_avg.add(group['eps']).sqrt_() # eps moved in sqrt\n\n if group['momentum'] > 0:\n buf = state['momentum_buffer']\n # Tensorflow accumulates the LR scaling in the momentum buffer\n if 'lr_in_momentum' in group and group['lr_in_momentum']:\n buf.mul_(group['momentum']).addcdiv_(group['lr'], grad, avg)\n p.data.add_(-buf)\n else:\n # PyTorch scales the param update by LR\n buf.mul_(group['momentum']).addcdiv_(grad, avg)\n p.data.add_(-group['lr'], buf)\n else:\n p.data.addcdiv_(-group['lr'], grad, avg)\n\n return loss" }, { "identifier": "SGDP", "path": "optim/sgdp.py", "snippet": "class SGDP(Optimizer):\n def __init__(self, params, lr=required, momentum=0, dampening=0,\n weight_decay=0, nesterov=False, eps=1e-8, delta=0.1, wd_ratio=0.1):\n defaults = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay,\n nesterov=nesterov, eps=eps, delta=delta, wd_ratio=wd_ratio)\n super(SGDP, self).__init__(params, defaults)\n\n def _channel_view(self, x):\n return x.view(x.size(0), -1)\n\n def _layer_view(self, x):\n return x.view(1, -1)\n\n def _cosine_similarity(self, x, y, eps, view_func):\n x = view_func(x)\n y = view_func(y)\n\n x_norm = x.norm(dim=1).add_(eps)\n y_norm = y.norm(dim=1).add_(eps)\n dot = (x * y).sum(dim=1)\n\n return dot.abs() / x_norm / y_norm\n\n def _projection(self, p, grad, perturb, delta, wd_ratio, eps):\n wd = 1\n expand_size = [-1] + [1] * (len(p.shape) - 1)\n for view_func in [self._channel_view, self._layer_view]:\n\n cosine_sim = self._cosine_similarity(grad, p.data, eps, view_func)\n\n if cosine_sim.max() < delta / math.sqrt(view_func(p.data).size(1)):\n p_n = p.data / view_func(p.data).norm(dim=1).view(expand_size).add_(eps)\n perturb -= p_n * view_func(p_n * perturb).sum(dim=1).view(expand_size)\n wd = wd_ratio\n\n return perturb, wd\n\n return perturb, wd\n\n def step(self, closure=None):\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n dampening = group['dampening']\n nesterov = group['nesterov']\n\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['momentum'] = torch.zeros_like(p.data)\n\n # SGD\n buf = state['momentum']\n buf.mul_(momentum).add_(1 - dampening, grad)\n if nesterov:\n d_p = grad + momentum * buf\n else:\n d_p = buf\n\n # Projection\n wd_ratio = 1\n if len(p.shape) > 1:\n d_p, wd_ratio = self._projection(p, grad, d_p, group['delta'], group['wd_ratio'], group['eps'])\n\n # Weight decay\n if weight_decay != 0:\n p.data.mul_(1 - group['lr'] * group['weight_decay'] * wd_ratio / (1-momentum))\n\n # Step\n p.data.add_(-group['lr'], d_p)\n\n return loss" } ]
import torch from torch import optim as optim from .adafactor import Adafactor from .adahessian import Adahessian from .adamp import AdamP from .lookahead import Lookahead from .nadam import Nadam from .novograd import NovoGrad from .nvnovograd import NvNovoGrad from .radam import RAdam from .rmsprop_tf import RMSpropTF from .sgdp import SGDP from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD
12,597
""" Optimizer Factory w/ Custom Weight Decay Hacked together by / Copyright 2020 Ross Wightman """ try: has_apex = True except ImportError: has_apex = False def add_weight_decay(model, image_encoder,text_encoder, weight_decay=1e-5, skip_list=()): decay = [] no_decay = [] for name, param in model.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: no_decay.append(param) else: decay.append(param) for name, param in image_encoder.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: no_decay.append(param) else: decay.append(param) for name, param in text_encoder.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: no_decay.append(param) else: decay.append(param) return [ {'params': no_decay, 'weight_decay': 0.}, {'params': decay, 'weight_decay': weight_decay}] def create_optimizer(args, model, image_encoder,text_encoder, filter_bias_and_bn=True): opt_lower = args.opt.lower() weight_decay = args.weight_decay if weight_decay and filter_bias_and_bn: skip = {} if hasattr(model, 'no_weight_decay'): skip = model.no_weight_decay() parameters = add_weight_decay(model,image_encoder,text_encoder, weight_decay, skip) weight_decay = 0. else: parameters = [filter(lambda p: p.requires_grad, model.parameters()),filter(lambda p: p.requires_grad, image_encoder.parameters()),filter(lambda p: p.requires_grad, text_encoder.parameters())] #model.parameters() # print(parameters) if 'fused' in opt_lower: assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers' opt_args = dict(lr=args.lr, weight_decay=weight_decay) if hasattr(args, 'opt_eps') and args.opt_eps is not None: opt_args['eps'] = args.opt_eps if hasattr(args, 'opt_betas') and args.opt_betas is not None: opt_args['betas'] = args.opt_betas if hasattr(args, 'opt_args') and args.opt_args is not None: opt_args.update(args.opt_args) opt_split = opt_lower.split('_') opt_lower = opt_split[-1] if opt_lower == 'sgd' or opt_lower == 'nesterov': opt_args.pop('eps', None) optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args) elif opt_lower == 'momentum': opt_args.pop('eps', None) optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args) elif opt_lower == 'adam': optimizer = optim.Adam(parameters, **opt_args) elif opt_lower == 'adamw': optimizer = optim.AdamW(parameters, **opt_args) elif opt_lower == 'nadam': optimizer = Nadam(parameters, **opt_args) elif opt_lower == 'radam': optimizer = RAdam(parameters, **opt_args) elif opt_lower == 'adamp': optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args) elif opt_lower == 'sgdp': optimizer = SGDP(parameters, momentum=args.momentum, nesterov=True, **opt_args) elif opt_lower == 'adadelta': optimizer = optim.Adadelta(parameters, **opt_args) elif opt_lower == 'adafactor': if not args.lr: opt_args['lr'] = None optimizer = Adafactor(parameters, **opt_args) elif opt_lower == 'adahessian': optimizer = Adahessian(parameters, **opt_args) elif opt_lower == 'rmsprop': optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=args.momentum, **opt_args) elif opt_lower == 'rmsproptf': optimizer = RMSpropTF(parameters, alpha=0.9, momentum=args.momentum, **opt_args) elif opt_lower == 'novograd':
""" Optimizer Factory w/ Custom Weight Decay Hacked together by / Copyright 2020 Ross Wightman """ try: has_apex = True except ImportError: has_apex = False def add_weight_decay(model, image_encoder,text_encoder, weight_decay=1e-5, skip_list=()): decay = [] no_decay = [] for name, param in model.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: no_decay.append(param) else: decay.append(param) for name, param in image_encoder.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: no_decay.append(param) else: decay.append(param) for name, param in text_encoder.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: no_decay.append(param) else: decay.append(param) return [ {'params': no_decay, 'weight_decay': 0.}, {'params': decay, 'weight_decay': weight_decay}] def create_optimizer(args, model, image_encoder,text_encoder, filter_bias_and_bn=True): opt_lower = args.opt.lower() weight_decay = args.weight_decay if weight_decay and filter_bias_and_bn: skip = {} if hasattr(model, 'no_weight_decay'): skip = model.no_weight_decay() parameters = add_weight_decay(model,image_encoder,text_encoder, weight_decay, skip) weight_decay = 0. else: parameters = [filter(lambda p: p.requires_grad, model.parameters()),filter(lambda p: p.requires_grad, image_encoder.parameters()),filter(lambda p: p.requires_grad, text_encoder.parameters())] #model.parameters() # print(parameters) if 'fused' in opt_lower: assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers' opt_args = dict(lr=args.lr, weight_decay=weight_decay) if hasattr(args, 'opt_eps') and args.opt_eps is not None: opt_args['eps'] = args.opt_eps if hasattr(args, 'opt_betas') and args.opt_betas is not None: opt_args['betas'] = args.opt_betas if hasattr(args, 'opt_args') and args.opt_args is not None: opt_args.update(args.opt_args) opt_split = opt_lower.split('_') opt_lower = opt_split[-1] if opt_lower == 'sgd' or opt_lower == 'nesterov': opt_args.pop('eps', None) optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args) elif opt_lower == 'momentum': opt_args.pop('eps', None) optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args) elif opt_lower == 'adam': optimizer = optim.Adam(parameters, **opt_args) elif opt_lower == 'adamw': optimizer = optim.AdamW(parameters, **opt_args) elif opt_lower == 'nadam': optimizer = Nadam(parameters, **opt_args) elif opt_lower == 'radam': optimizer = RAdam(parameters, **opt_args) elif opt_lower == 'adamp': optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args) elif opt_lower == 'sgdp': optimizer = SGDP(parameters, momentum=args.momentum, nesterov=True, **opt_args) elif opt_lower == 'adadelta': optimizer = optim.Adadelta(parameters, **opt_args) elif opt_lower == 'adafactor': if not args.lr: opt_args['lr'] = None optimizer = Adafactor(parameters, **opt_args) elif opt_lower == 'adahessian': optimizer = Adahessian(parameters, **opt_args) elif opt_lower == 'rmsprop': optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=args.momentum, **opt_args) elif opt_lower == 'rmsproptf': optimizer = RMSpropTF(parameters, alpha=0.9, momentum=args.momentum, **opt_args) elif opt_lower == 'novograd':
optimizer = NovoGrad(parameters, **opt_args)
5
2023-10-30 00:24:16+00:00
16k
YichenZW/Coh-MGT-Detection
run_detector.py
[ { "identifier": "glue_compute_metrics", "path": "util.py", "snippet": "def glue_compute_metrics(task_name, preds, labels):\n assert len(preds) == len(labels)\n if task_name == \"cola\":\n return {\"mcc\": matthews_corrcoef(labels, preds)}\n elif task_name == \"sst-2\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"mrpc\" or task_name == \"deepfake\":\n return acc_and_f1(preds, labels)\n elif task_name == \"sts-b\":\n return pearson_and_spearman(preds, labels)\n elif task_name == \"qqp\":\n return acc_and_f1(preds, labels)\n elif task_name == \"mnli\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"mnli-mm\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"qnli\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"rte\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"wnli\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"hans\":\n return {\"acc\": simple_accuracy(preds, labels)}\n else:\n raise KeyError(task_name)" }, { "identifier": "glue_convert_examples_to_features", "path": "util.py", "snippet": "def glue_convert_examples_to_features(\n examples,\n tokenizer,\n max_length=512,\n task=None,\n label_list=None,\n output_mode=None,\n pad_on_left=False,\n pad_token=0,\n pad_token_segment_id=0,\n mask_padding_with_zero=True,\n):\n \"\"\"\n Loads a data file into a list of ``InputFeatures``\n Args:\n examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples.\n tokenizer: Instance of a tokenizer that will tokenize the examples\n max_length: Maximum example length\n task: GLUE task\n label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method\n output_mode: String indicating the output mode. Either ``regression`` or ``classification``\n pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)\n pad_token: Padding token\n pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4)\n mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values\n and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for\n actual values)\n Returns:\n If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset``\n containing the task-specific features. If the input is a list of ``InputExamples``, will return\n a list of task-specific ``InputFeatures`` which can be fed to the model.\n \"\"\"\n\n if task is not None:\n processor = glue_processors[task]()\n if label_list is None:\n label_list = processor.get_labels()\n logger.info(\"Using label list %s for task %s\" % (label_list, task))\n if output_mode is None:\n output_mode = glue_output_modes[task]\n logger.info(\"Using output mode %s for task %s\" % (output_mode, task))\n\n label_map = {label: i for i, label in enumerate(label_list)}\n\n features = []\n for ex_index, example in enumerate(examples):\n len_examples = 0\n\n len_examples = len(examples)\n if ex_index % 10000 == 0:\n logger.info(\"Writing example %d/%d\" % (ex_index, len_examples))\n\n inputs = tokenizer.encode_plus(\n example.text_a,\n add_special_tokens=True,\n max_length=max_length,\n return_token_type_ids=True,\n )\n input_ids, token_type_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"]\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # Tokens are attended to.\n attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding_length = max_length - len(input_ids)\n if pad_on_left:\n input_ids = ([pad_token] * padding_length) + input_ids\n attention_mask = (\n [0 if mask_padding_with_zero else 1] * padding_length\n ) + attention_mask\n token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids\n else:\n input_ids = input_ids + ([pad_token] * padding_length)\n attention_mask = attention_mask + (\n [0 if mask_padding_with_zero else 1] * padding_length\n )\n token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)\n\n assert len(input_ids) == max_length, \"Error with input length {} vs {}\".format(\n len(input_ids), max_length\n )\n assert (\n len(attention_mask) == max_length\n ), \"Error with input length {} vs {}\".format(len(attention_mask), max_length)\n assert (\n len(token_type_ids) == max_length\n ), \"Error with input length {} vs {}\".format(len(token_type_ids), max_length)\n\n if output_mode == \"classification\":\n label = label_map[example.label]\n elif output_mode == \"regression\":\n label = float(example.label)\n else:\n raise KeyError(output_mode)\n\n if ex_index < 5:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\n \"attention_mask: %s\" % \" \".join([str(x) for x in attention_mask])\n )\n logger.info(\n \"token_type_ids: %s\" % \" \".join([str(x) for x in token_type_ids])\n )\n logger.info(\"label: %s (id = %d)\" % (example.label, label))\n\n features.append(\n InputFeatures(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n label=label,\n nodes_index=example.nodes_index,\n adj_metric=example.adj_metric,\n sen2node=example.sen2node,\n nodes_ent=example.nodes_ent,\n )\n )\n\n return features" }, { "identifier": "glue_output_modes", "path": "util.py", "snippet": "class InputExample(object):\nclass InputFeatures(object):\nclass DeepFakeProcessor(DataProcessor):\n def __init__(\n self,\n guid,\n text_a,\n text_b=None,\n label=None,\n nodes_index=None,\n adj_metric=None,\n all_tokens=None,\n sen2node=None,\n nodes_ent=None,\n ):\n def __repr__(self):\n def to_dict(self):\n def to_json_string(self):\n def __init__(\n self,\n input_ids,\n attention_mask=None,\n token_type_ids=None,\n label=None,\n nodes_index=None,\n adj_metric=None,\n sen2node=None,\n nodes_ent=None,\n ):\n def __repr__(self):\n def to_dict(self):\n def to_json_string(self):\ndef glue_convert_examples_to_features(\n examples,\n tokenizer,\n max_length=512,\n task=None,\n label_list=None,\n output_mode=None,\n pad_on_left=False,\n pad_token=0,\n pad_token_segment_id=0,\n mask_padding_with_zero=True,\n):\n def get_example_from_tensor_dict(self, tensor_dict):\n def _read_jsonl(self, path):\n def get_train_examples(\n self, with_relation, data_dir, train_file=\"gpt2_500_train_Graph.jsonl\"\n ):\n def get_dev_examples(\n self, with_relation, data_dir, dev_file=\"gpt2_dev_Graph.jsonl\"\n ):\n def get_test_examples(\n self, with_relation, data_dir, test_file=\"gpt2_test_Graph.jsonl\"\n ):\n def get_labels(self):\n def _get_nodes(self, nodes):\n def _get_adj_metric(self, edges, drop_nodes, node_num, with_relation):\n def clean_string(self, string):\n def _create_examples(self, with_relation, inputs, set_type):\ndef simple_accuracy(preds, labels):\ndef acc_and_f1(preds, labels):\ndef pearson_and_spearman(preds, labels):\ndef glue_compute_metrics(task_name, preds, labels):\ndef xnli_compute_metrics(task_name, preds, labels):" }, { "identifier": "glue_processors", "path": "util.py", "snippet": "class InputExample(object):\nclass InputFeatures(object):\nclass DeepFakeProcessor(DataProcessor):\n def __init__(\n self,\n guid,\n text_a,\n text_b=None,\n label=None,\n nodes_index=None,\n adj_metric=None,\n all_tokens=None,\n sen2node=None,\n nodes_ent=None,\n ):\n def __repr__(self):\n def to_dict(self):\n def to_json_string(self):\n def __init__(\n self,\n input_ids,\n attention_mask=None,\n token_type_ids=None,\n label=None,\n nodes_index=None,\n adj_metric=None,\n sen2node=None,\n nodes_ent=None,\n ):\n def __repr__(self):\n def to_dict(self):\n def to_json_string(self):\ndef glue_convert_examples_to_features(\n examples,\n tokenizer,\n max_length=512,\n task=None,\n label_list=None,\n output_mode=None,\n pad_on_left=False,\n pad_token=0,\n pad_token_segment_id=0,\n mask_padding_with_zero=True,\n):\n def get_example_from_tensor_dict(self, tensor_dict):\n def _read_jsonl(self, path):\n def get_train_examples(\n self, with_relation, data_dir, train_file=\"gpt2_500_train_Graph.jsonl\"\n ):\n def get_dev_examples(\n self, with_relation, data_dir, dev_file=\"gpt2_dev_Graph.jsonl\"\n ):\n def get_test_examples(\n self, with_relation, data_dir, test_file=\"gpt2_test_Graph.jsonl\"\n ):\n def get_labels(self):\n def _get_nodes(self, nodes):\n def _get_adj_metric(self, edges, drop_nodes, node_num, with_relation):\n def clean_string(self, string):\n def _create_examples(self, with_relation, inputs, set_type):\ndef simple_accuracy(preds, labels):\ndef acc_and_f1(preds, labels):\ndef pearson_and_spearman(preds, labels):\ndef glue_compute_metrics(task_name, preds, labels):\ndef xnli_compute_metrics(task_name, preds, labels):" }, { "identifier": "RobertaForGraphBasedSequenceClassification", "path": "modeling_roberta.py", "snippet": "class RobertaForGraphBasedSequenceClassification(\n BertPreTrainedModel\n): \n def __init__(self, config):\n config.output_hidden_states = True\n config.output_attentions = True\n\n super(RobertaForGraphBasedSequenceClassification, self).__init__(config)\n self.num_labels = config.num_labels\n self.classifier = RobertaClassificationHead(config, graph_node_size=None)\n self.graph_aggregation = GCNGraphAgg(\n config.hidden_size, self.node_size, self.max_sentence_size\n )\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n nodes_index_mask=None,\n adj_metric=None,\n node_mask=None,\n sen2node=None,\n sentence_mask=None,\n sentence_length=None,\n ):\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n )\n sequence_output = outputs[0][:, 0, :]\n \n hidden_states = outputs[2][0]\n\n graph_rep = self.graph_aggregation(\n hidden_states,\n nodes_index_mask,\n adj_metric,\n node_mask,\n sen2node,\n sentence_mask,\n sentence_length,\n )\n whole_rep = torch.cat([sequence_output, graph_rep], dim=-1)\n\n logits = self.classifier(whole_rep, dim=-1)\n\n outputs = (logits,) + outputs[2:]\n if labels is not None:\n if self.num_labels == 1:\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n outputs = (loss,) + outputs\n\n return outputs, whole_rep " }, { "identifier": "RobertaForGraphBasedSequenceClassification_CL", "path": "modeling_roberta.py", "snippet": "class RobertaForGraphBasedSequenceClassification_CL(BertPreTrainedModel):\n def __init__(self, config):\n config.output_hidden_states = True\n config.output_attentions = True\n\n super(RobertaForGraphBasedSequenceClassification_CL, self).__init__(config)\n self.temperature = 0.2\n self.num_labels = config.num_labels\n self.gcn_layer = config.task_specific_params[\"gcn_layer\"]\n self.max_node_num = config.task_specific_params[\"max_nodes_num\"]\n self.max_sentences = config.task_specific_params[\"max_sentences\"]\n self.max_sen_replen = config.task_specific_params[\"max_sen_replen\"]\n self.attention_maxscore = config.task_specific_params[\"attention_maxscore\"]\n self.relation_num = config.task_specific_params[\"relation_num\"]\n\n self.roberta = RobertaModel(config)\n self.classifier = RobertaClassificationHead(\n config, graph_node_size=self.max_sen_replen\n )\n self.graph_aggregation = GCNGraphAgg(\n config.hidden_size,\n self.max_sentences,\n self.gcn_layer,\n self.max_sen_replen,\n self.attention_maxscore,\n self.relation_num,\n )\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n nodes_index_mask=None,\n adj_metric=None,\n node_mask=None,\n sen2node=None,\n sentence_mask=None,\n sentence_length=None,\n batch_id=None,\n ):\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n )\n sequence_output = outputs[0][:, 0, :]\n hidden_states = outputs[2][0]\n\n graph_rep = self.graph_aggregation(\n hidden_states,\n nodes_index_mask,\n adj_metric,\n node_mask,\n sen2node,\n sentence_mask,\n sentence_length,\n )\n whole_rep = torch.cat([sequence_output, graph_rep], dim=-1)\n\n logits = self.classifier(torch.cat([sequence_output, graph_rep], dim=-1))\n\n outputs = (logits,) + outputs[2:]\n if labels is not None:\n if self.num_labels == 1:\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n batch_size = len(labels)\n batch_idx_by_label = {}\n for i in range(2):\n batch_idx_by_label[i] = [\n idx\n for idx in range(batch_size)\n if int(labels.view(-1)[idx]) == i\n ] \n\n contraloss = self.contrastive_loss_labelwise_winslide(\n batch_size, batch_idx_by_label, whole_rep\n )\n\n loss_fct = CrossEntropyLoss()\n ce_loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n contraloss_weight = 0.6\n loss = (\n 1.0 - contraloss_weight\n ) * ce_loss + contraloss_weight * contraloss\n outputs = (loss,) + outputs\n return outputs, whole_rep \n\n def get_key(self, dic, value):\n return [k for k, v in dic.items() if value in v]\n\n def contrastive_loss_labelwise_winslide(\n self, batch_size, batch_idx_by_label, hidden_feats\n ):\n \"\"\"\n Hidden feats must be normalized\n\n \"\"\"\n hidden_feats = F.normalize(hidden_feats, dim=1)\n sim_matrix = torch.mm(hidden_feats, hidden_feats.T) \n loss = 0.0\n\n for i in range(batch_size):\n label_list = self.get_key(batch_idx_by_label, i)\n label = label_list[0]\n one_same_label = (\n torch.zeros((batch_size,))\n .to(sim_matrix.device)\n .scatter_(\n 0,\n torch.tensor(batch_idx_by_label[label]).to(sim_matrix.device),\n 1.0,\n )\n )\n one_diff_label = (\n torch.ones((batch_size,))\n .to(sim_matrix.device)\n .scatter_(\n 0,\n torch.tensor(batch_idx_by_label[label]).to(sim_matrix.device),\n 0.0,\n )\n )\n one_for_not_i = (\n torch.ones((batch_size,))\n .to(sim_matrix.device)\n .scatter_(0, torch.tensor([i]).to(sim_matrix.device), 0.0)\n ) \n one_for_numerator = one_same_label.mul(one_for_not_i)\n\n numerator = torch.sum(\n one_for_numerator * torch.exp(sim_matrix[i, :] / self.temperature)\n )\n denominator = torch.sum(\n one_for_not_i * torch.exp(sim_matrix[i, :] / self.temperature)\n )\n\n if numerator == 0:\n numerator += 1e-6\n if denominator == 0:\n denominator += 1e-6\n\n loss += -torch.log(numerator / denominator)\n\n return loss / batch_size" }, { "identifier": "RobertaForGraphBasedSequenceClassification_MBCL", "path": "modeling_roberta.py", "snippet": "class RobertaForGraphBasedSequenceClassification_MBCL(BertPreTrainedModel):\n def __init__(self, config, mb_dataloader, train_idx_by_label):\n config.output_hidden_states = True\n config.output_attentions = True\n\n super(RobertaForGraphBasedSequenceClassification_MBCL, self).__init__(config)\n self.temperature = 0.2\n self.num_labels = config.num_labels\n self.gcn_layer = config.task_specific_params[\"gcn_layer\"]\n self.max_node_num = config.task_specific_params[\"max_nodes_num\"]\n self.max_sentences = config.task_specific_params[\"max_sentences\"]\n self.max_sen_replen = config.task_specific_params[\"max_sen_replen\"]\n self.attention_maxscore = config.task_specific_params[\"attention_maxscore\"]\n self.relation_num = config.task_specific_params[\"relation_num\"]\n self.train_idx_by_label = train_idx_by_label\n self.classifier = RobertaClassificationHead(\n config, graph_node_size=self.max_sen_replen\n )\n self.model_q = EncoderForMBCL(config)\n self.model_k = EncoderForMBCL(config)\n for param_q, param_k in zip(\n self.model_q.parameters(), self.model_k.parameters()\n ):\n param_k.data.copy_(param_q.data) \n self.model_q.cuda()\n self.model_k.cuda()\n with torch.no_grad():\n for k, item in enumerate(mb_dataloader):\n input_ids = item[0].cuda()\n attention_mask = item[1].cuda()\n labels = item[3].cuda()\n nodes_index_mask = item[4].cuda()\n adj_metric = item[5].cuda()\n node_mask = item[6].cuda()\n sen2node = item[7].cuda()\n sentence_mask = item[8].cuda()\n sentence_length = item[9].cuda()\n\n output = self.model_q(\n input_ids=input_ids,\n attention_mask=attention_mask,\n labels=labels,\n nodes_index_mask=nodes_index_mask,\n adj_metric=adj_metric,\n node_mask=node_mask,\n sen2node=sen2node,\n sentence_mask=sentence_mask,\n sentence_length=sentence_length,\n )\n init_feat = F.normalize(output[1], dim=1)\n if k == 0:\n self.queue = init_feat\n else:\n self.queue = torch.vstack((self.queue, init_feat))\n\n print(self.queue.size())\n print(\"***queue already builded***\")\n\n self.config = self.model_q.config\n self.feat_dim = self.config.hidden_size\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n nodes_index_mask=None,\n adj_metric=None,\n node_mask=None,\n sen2node=None,\n sentence_mask=None,\n sentence_length=None,\n batch_id=None,\n ):\n if self.training:\n batch_size = int(input_ids.size(0))\n output_q = self.model_q(\n input_ids,\n attention_mask,\n token_type_ids,\n position_ids,\n head_mask,\n inputs_embeds,\n labels,\n nodes_index_mask,\n adj_metric,\n node_mask,\n sen2node,\n sentence_mask,\n sentence_length,\n batch_id,\n ) \n q_feat = output_q[1]\n logits = self.classifier(output_q[1])\n outputs = (logits,) + output_q[0]\n loss_fct = CrossEntropyLoss()\n q_ce_loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n output_k = self.model_k(\n input_ids,\n attention_mask,\n token_type_ids,\n position_ids,\n head_mask,\n inputs_embeds,\n labels,\n nodes_index_mask,\n adj_metric,\n node_mask,\n sen2node,\n sentence_mask,\n sentence_length,\n batch_id,\n )\n k_feat = output_k[1]\n self.dequeue_and_enqueue(k_feat, batch_id)\n batch_idx_by_label = {}\n for i in range(2):\n batch_idx_by_label[i] = [\n idx for idx in range(batch_size) if labels[idx] == i\n ] \n contraloss = self.contrastive_loss_es(\n batch_size, batch_idx_by_label, q_feat\n )\n self.momentum_update(m=0.999)\n contraloss_weight = 0.6\n loss = (\n 1.0 - contraloss_weight\n ) * q_ce_loss + contraloss_weight * contraloss\n\n outputs = (loss,) + outputs\n\n return outputs, output_q[1] \n else:\n batch_size = int(input_ids.size(0))\n output_q = self.model_q(\n input_ids,\n attention_mask,\n token_type_ids,\n position_ids,\n head_mask,\n inputs_embeds,\n labels,\n nodes_index_mask,\n adj_metric,\n node_mask,\n sen2node,\n sentence_mask,\n sentence_length,\n batch_id,\n ) \n q_feat = output_q[1]\n logits = self.classifier(output_q[1])\n outputs = (logits,) + output_q[0]\n loss_fct = CrossEntropyLoss()\n q_ce_loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n batch_idx_by_label = {}\n for i in range(2):\n batch_idx_by_label[i] = [\n idx for idx in range(batch_size) if labels[idx] == i\n ] \n contraloss = self.contrastive_loss_es(\n batch_size, batch_idx_by_label, q_feat\n )\n contraloss_weight = 0.6\n loss = (\n 1.0 - contraloss_weight\n ) * q_ce_loss + contraloss_weight * contraloss\n\n outputs = (loss,) + outputs\n\n return outputs, output_q[1] \n\n def get_key(self, dic, value):\n return [k for k, v in dic.items() if value in v]\n\n def contrastive_loss_es(self, batch_size, batch_idx_by_label, hidden_feats):\n hidden_feats = F.normalize(hidden_feats, dim=1)\n change_dic = {0: 1, 1: 0}\n loss = 0\n\n for i in batch_idx_by_label:\n q = hidden_feats[batch_idx_by_label[i]]\n pos_bank = self.queue[self.train_idx_by_label[i]]\n pos_pair = torch.mm(q, pos_bank.transpose(0, 1))\n bottom_k = torch.topk(pos_pair, k=100, dim=1, largest=False).values\n neg_bank = self.queue[self.train_idx_by_label[change_dic[i]]]\n neg_pair = torch.mm(q, neg_bank.transpose(0, 1))\n top_k = torch.topk(neg_pair, k=100, dim=1).values\n numerator = torch.sum(torch.exp(bottom_k / self.temperature), dim=1)\n denominator = (\n torch.sum(torch.exp(top_k / self.temperature), dim=1) + numerator\n )\n\n for nid in range(len(numerator)):\n if numerator[nid] == 0:\n numerator[nid] += 1e-6\n for did in range(len(denominator)):\n if denominator[did] == 0:\n denominator[did] += 1e-6\n loss += torch.sum(-1.0 * torch.log(numerator / denominator))\n\n return loss / batch_size\n\n @torch.no_grad()\n def momentum_update(self, m=0.999):\n \"\"\"\n encoder_k = m * encoder_k + (1 - m) encoder_q\n \"\"\"\n for param_q, param_k in zip(\n self.model_q.parameters(), self.model_k.parameters()\n ):\n param_k.data = param_k.data * m + param_q.data * (1.0 - m)\n\n def dequeue_and_enqueue(self, hidden_batch_feats, selected_batch_idx):\n \"\"\"\n Update memory bank by batch window slide; hidden_batch_feats must be normalized\n \"\"\"\n assert hidden_batch_feats.size()[1] == self.queue.size()[1]\n\n self.queue[selected_batch_idx] = F.normalize(hidden_batch_feats, dim=1)" }, { "identifier": "EncoderForMBCL", "path": "modeling_roberta.py", "snippet": "class EncoderForMBCL(BertPreTrainedModel):\n def __init__(self, config):\n super(EncoderForMBCL, self).__init__(config)\n self.max_sen_replen = config.task_specific_params[\"max_sen_replen\"]\n self.max_sentences = config.task_specific_params[\"max_sentences\"]\n self.gcn_layer = config.task_specific_params[\"gcn_layer\"]\n self.attention_maxscore = config.task_specific_params[\"attention_maxscore\"]\n self.relation_num = config.task_specific_params[\"relation_num\"]\n\n self.roberta = RobertaModel(config)\n self.graph_aggregation = GCNGraphAgg(\n config.hidden_size,\n self.max_sentences,\n self.gcn_layer,\n self.max_sen_replen,\n self.attention_maxscore,\n self.relation_num,\n )\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n nodes_index_mask=None,\n adj_metric=None,\n node_mask=None,\n sen2node=None,\n sentence_mask=None,\n sentence_length=None,\n batch_id=None,\n ):\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n )\n sequence_output = outputs[0][:, 0, :] \n hidden_states = outputs[2][0] \n\n graph_rep = self.graph_aggregation(\n hidden_states,\n nodes_index_mask,\n adj_metric,\n node_mask,\n sen2node,\n sentence_mask,\n sentence_length,\n )\n\n whole_rep = torch.cat([sequence_output, graph_rep], dim=-1) \n\n return outputs[2:], whole_rep" }, { "identifier": "RobertaForGraphBasedSequenceClassification_RFCL", "path": "modeling_roberta.py", "snippet": "class RobertaForGraphBasedSequenceClassification_RFCL(BertPreTrainedModel):\n def __init__(self, config):\n config.output_hidden_states = True\n config.output_attentions = True\n\n super(RobertaForGraphBasedSequenceClassification_RFCL, self).__init__(config)\n self.temperature = 0.2\n self.num_labels = config.num_labels\n self.gcn_layer = config.task_specific_params[\"gcn_layer\"]\n self.max_node_num = config.task_specific_params[\"max_nodes_num\"]\n self.max_sentences = config.task_specific_params[\"max_sentences\"]\n self.max_sen_replen = config.task_specific_params[\"max_sen_replen\"]\n self.attention_maxscore = config.task_specific_params[\"attention_maxscore\"]\n self.relation_num = config.task_specific_params[\"relation_num\"]\n\n self.roberta = RobertaModel(config)\n self.classifier = RobertaClassificationHead(\n config, graph_node_size=self.max_sen_replen\n )\n self.graph_aggregation = GCNGraphAgg(\n config.hidden_size,\n self.max_sentences,\n self.gcn_layer,\n self.max_sen_replen,\n self.attention_maxscore,\n self.relation_num,\n )\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n nodes_index_mask=None,\n adj_metric=None,\n node_mask=None,\n sen2node=None,\n sentence_mask=None,\n sentence_length=None,\n batch_id=None,\n ):\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n )\n sequence_output = outputs[0][:, 0, :]\n hidden_states = outputs[2][0]\n\n graph_rep = self.graph_aggregation(\n hidden_states,\n nodes_index_mask,\n adj_metric,\n node_mask,\n sen2node,\n sentence_mask,\n sentence_length,\n )\n whole_rep = torch.cat([sequence_output, graph_rep], dim=-1)\n\n logits = self.classifier(torch.cat([sequence_output, graph_rep], dim=-1))\n\n outputs = (logits,) + outputs[2:]\n\n if labels is not None:\n if self.num_labels == 1:\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n batch_size = len(labels)\n batch_idx_by_label = {}\n for i in range(2):\n batch_idx_by_label[i] = [\n idx\n for idx in range(batch_size)\n if int(labels.view(-1)[idx]) == i\n ] \n\n contraloss = self.contrastive_loss_es(\n batch_size, batch_idx_by_label, whole_rep\n )\n\n loss_fct = CrossEntropyLoss()\n ce_loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n contraloss_weight = 0.6\n loss = (\n 1.0 - contraloss_weight\n ) * ce_loss + contraloss_weight * contraloss\n\n outputs = (loss,) + outputs\n\n return outputs, whole_rep \n\n def get_key(self, dic, value):\n return [k for k, v in dic.items() if value in v]\n\n def contrastive_loss_es(self, batch_size, batch_idx_by_label, hidden_feats):\n hidden_feats = F.normalize(hidden_feats, dim=1)\n loss = 0\n sim_matrix = torch.mm(hidden_feats, hidden_feats.T) \n loss = 0.0\n\n for i in range(batch_size):\n label_list = self.get_key(batch_idx_by_label, i)\n label = label_list[0]\n one_same_label = (\n torch.zeros((batch_size,))\n .to(sim_matrix.device)\n .scatter_(\n 0,\n torch.tensor(batch_idx_by_label[label]).to(sim_matrix.device),\n 1.0,\n )\n )\n one_diff_label = (\n torch.ones((batch_size,))\n .to(sim_matrix.device)\n .scatter_(\n 0,\n torch.tensor(batch_idx_by_label[label]).to(sim_matrix.device),\n 0.0,\n )\n )\n one_for_not_i = (\n torch.ones((batch_size,))\n .to(sim_matrix.device)\n .scatter_(0, torch.tensor([i]).to(sim_matrix.device), 0.0)\n ) \n one_for_numerator = one_same_label.mul(one_for_not_i)\n one_for_neg = one_diff_label.mul(one_for_not_i)\n\n numerator = torch.sum(\n one_for_numerator * torch.exp(sim_matrix[i, :] / self.temperature)\n )\n denominator = torch.sum(\n one_for_not_i * torch.exp(sim_matrix[i, :] / self.temperature)\n )\n\n if numerator == 0:\n numerator += 1e-6\n if denominator == 0:\n denominator += 1e-6\n\n loss += -torch.log(numerator / denominator)\n\n return loss / batch_size" } ]
import os import torch import argparse import logging import random import wandb import numpy as np import ray from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from torch.optim import AdamW from transformers import ( set_seed, AutoTokenizer, AutoConfig, AutoModel, AutoModelForSequenceClassification, get_linear_schedule_with_warmup, ) from functools import partial from util import glue_compute_metrics as compute_metrics from util import ( glue_convert_examples_to_features as convert_examples_to_features, ) from util import glue_output_modes as output_modes from util import glue_processors as processors from modeling_roberta import ( RobertaForGraphBasedSequenceClassification, RobertaForGraphBasedSequenceClassification_CL, RobertaForGraphBasedSequenceClassification_MBCL, EncoderForMBCL, RobertaForGraphBasedSequenceClassification_RFCL, ) from ray import tune from ray.tune import CLIReporter from ray.tune.schedulers import ASHAScheduler from apex import amp
11,110
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): eval_dataset = load_and_cache_examples( args, eval_task, tokenizer, evaluate=True, mode=mode ) if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly. eval_sampler = SequentialSampler(eval_dataset) eval_dataloader = DataLoader( eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size ) if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel): model = torch.nn.DataParallel(model) # Evaluation logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 preds, out_label_ids = None, None for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = { "input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3], "nodes_index_mask": batch[4], "adj_metric": batch[5], "node_mask": batch[6], "sen2node": batch[7], "sentence_mask": batch[8], "sentence_length": batch[9], } if args.model_type != "distilbert": inputs["token_type_ids"] = ( batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids outputs, _ = model(**inputs) tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = inputs["labels"].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append( out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0 ) probs = preds eval_loss = eval_loss / nb_eval_steps if args.output_mode == "classification": preds = np.argmax(preds, axis=1) elif args.output_mode == "regression": preds = np.squeeze(preds) result = compute_metrics(eval_task, preds, out_label_ids) results.update(result) output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results {} *****".format(prefix)) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) wandb.log( { "eval/acc": result["acc"], "eval/f1": result["f1"], "eval/acc_and_f1": result["acc_and_f1"], } ) return results def load_and_cache_examples( args, task, tokenizer, evaluate=False, mode="train", dataset_name="", rel="" ): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier() processor = processors[task]() output_mode = output_modes[task] # Load data features from cache or dataset file cached_features_file = os.path.join( args.data_dir, "cached_{}_{}_{}_{}_{}_{}".format( mode, list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length), str(task), str(dataset_name), str(rel), ), ) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", args.data_dir) label_list = processor.get_labels() if mode == "train": examples = processor.get_train_examples(args.with_relation, args.data_dir) elif mode == "dev": examples = processor.get_dev_examples(args.with_relation, args.data_dir) elif mode == "test": examples = processor.get_test_examples(args.with_relation, args.data_dir)
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Based on code from the above authors, modifications made by Xi'an Jiaotong University. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logger = logging.getLogger(__name__) def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def number_h(num): for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]: if abs(num) < 1000.0: return "%3.1f%s" % (num, unit) num /= 1000.0 return "%.1f%s" % (num, "Yi") def generate_shaped_nodes_mask(nodes, max_seq_length, max_nodes_num): nodes_mask = np.zeros(shape=(max_nodes_num, max_seq_length)) nodes_num = min(len(nodes), max_nodes_num) for i in range(nodes_num): span = nodes[i] if span[0] != -1: if span[0] < max_seq_length - 1: end_pos = ( span[1] if span[1] < max_seq_length - 1 else max_seq_length - 1 ) nodes_mask[i, span[0] + 1 : end_pos + 1] = 1 else: continue return nodes_mask, nodes_num def generate_shaped_edge_mask(adj_metric, nodes_num, max_nodes_num, relation_n): if nodes_num != 0: if relation_n != 0: new_adj_metric = np.zeros(shape=(relation_n, max_nodes_num, max_nodes_num)) for i in range(relation_n): new_adj_metric[i][:nodes_num, :nodes_num] = adj_metric[i][ :nodes_num, :nodes_num ] else: new_adj_metric = np.zeros(shape=(max_nodes_num, max_nodes_num)) new_adj_metric[:nodes_num, :nodes_num] = adj_metric[:nodes_num, :nodes_num] return new_adj_metric def train(args, train_dataset, model, tokenizer): """Train the model""" total_params = sum(p.numel() for p in model.parameters()) total_trainable_params = sum( p.numel() for p in model.parameters() if p.requires_grad ) print("Total Params:", number_h(total_params)) print("Total Trainable Params:", number_h(total_trainable_params)) args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = ( RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) ) train_dataloader = DataLoader( train_dataset, sampler=train_sampler, batch_size=args.train_batch_size ) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = ( args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 ) else: t_total = ( len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs ) # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) ], "weight_decay": args.weight_decay, }, { "params": [ p for n, p in model.named_parameters() if any(nd in n for nd in no_decay) ], "weight_decay": 0.01, }, ] optimizer = AdamW( optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon ) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) # Check if saved optimizer or scheduler states exist if os.path.isfile( os.path.join(args.model_name_or_path, "optimizer.pt") ) and os.path.isfile(os.path.join(args.model_name_or_path, "scheduler.pt")): optimizer.load_state_dict( torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")) ) scheduler.load_state_dict( torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")) ) if args.fp16: try: except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use fp16 training." ) model, optimizer = amp.initialize( model, optimizer, opt_level=args.fp16_opt_level ) # Multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True, ) # Training logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info( " Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size ) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) best_acc, best_f1 = 0.0, 0.0 global_step, epochs_trained, steps_trained_in_current_epoch = 0, 0, 0 # Check if continuing training from a checkpoint if os.path.exists(args.model_name_or_path): # set global_step to gobal_step of last saved checkpoint from model path global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0]) epochs_trained = global_step // ( len(train_dataloader) // args.gradient_accumulation_steps ) steps_trained_in_current_epoch = global_step % ( len(train_dataloader) // args.gradient_accumulation_steps ) logger.info( " Continuing training from checkpoint, will skip to saved global_step" ) logger.info(" Continuing training from epoch %d", epochs_trained) logger.info(" Continuing training from global step %d", global_step) logger.info( " Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch, ) tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange( epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0], ) set_seed(args) max_acc, max_acc_f1, max_f1, max_f1_acc = 0.0, 0.0, 0.0, 0.0 for idx, _ in enumerate(train_iterator): tr_loss = 0.0 epoch_iterator = tqdm( train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0] ) for step, batch in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue model.train() batch = tuple(t.to(args.device) for t in batch) inputs = { "input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3], "nodes_index_mask": batch[4], "adj_metric": batch[5], "node_mask": batch[6], "sen2node": batch[7], "sentence_mask": batch[8], "sentence_length": batch[9], "batch_id": batch[10], } if args.model_type != "distilbert": inputs["token_type_ids"] = ( batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None ) outputs, _ = model(**inputs) loss = outputs[0] wandb.log({"train/loss": loss}) if args.n_gpu > 1: loss = loss.mean() if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() epoch_iterator.set_description( "loss {}".format( round(tr_loss * args.gradient_accumulation_steps / (step + 1), 4) ) ) if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_( amp.master_params(optimizer), args.max_grad_norm ) else: torch.nn.utils.clip_grad_norm_( model.parameters(), args.max_grad_norm ) optimizer.step() scheduler.step() model.zero_grad() global_step += 1 if ( args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0 ): logs = {} if ( args.local_rank == -1 and args.evaluate_during_training ): results = evaluate(args, model, tokenizer) for key, value in results.items(): eval_key = "eval_{}".format(key) logs[eval_key] = value loss_scalar = (tr_loss - logging_loss) / args.logging_steps learning_rate_scalar = scheduler.get_lr()[0] logs["learning_rate"] = learning_rate_scalar logs["loss"] = loss_scalar logging_loss = tr_loss wandb.log({"eval/loss": loss_scalar}) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.local_rank in [-1, 0] and args.save_steps > 0 and args.do_eval: results = evaluate(args, model, tokenizer, checkpoint=str(idx)) logger.info("the results is {}".format(results)) if results["acc"] > max_acc: max_acc = results["acc"] max_acc_f1 = results["f1"] if results["f1"] > max_f1: max_f1 = results["f1"] max_f1_acc = results["acc"] if results["f1"] > best_f1: best_f1 = results["f1"] output_dir = os.path.join( args.output_dir, "seed-{}".format(args.seed), "checkpoint-{}-{}".format(idx, best_f1), ) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save( args, os.path.join(output_dir, "training_{}.bin".format(idx)) ) logger.info("Saving model checkpoint to %s", output_dir) torch.save( optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt") ) torch.save( scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt") ) logger.info("Saving optimizer and scheduler states to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break return_res = { "max_acc": max_acc, "max_acc_f1": max_acc_f1, "max_f1": max_f1, "max_f1_acc": max_f1_acc, } if args.do_ray: tune.report( accuracy=max_acc, max_acc_f1=max_acc_f1, f1=max_f1, max_f1_acc=max_f1_acc ) return global_step, tr_loss / global_step, return_res, output_dir def mb_train(args, train_dataset, encoder_q, encoder_k, dataloader, tokenizer): """Train the model""" global memory_queue encoder_q.train() total_params = sum(p.numel() for p in encoder_q.parameters()) total_trainable_params = sum( p.numel() for p in encoder_q.parameters() if p.requires_grad ) print("Encoder Params:", number_h(total_params)) print("Encoder Trainable Params:", number_h(total_trainable_params)) args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = ( RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) ) train_dataloader = DataLoader( train_dataset, sampler=train_sampler, batch_size=args.train_batch_size ) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = ( args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 ) else: t_total = ( len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs ) # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [ p for n, p in encoder_q.named_parameters() if not any(nd in n for nd in no_decay) ], "weight_decay": args.weight_decay, }, { "params": [ p for n, p in encoder_q.named_parameters() if any(nd in n for nd in no_decay) ], "weight_decay": 0.01, }, ] optimizer = AdamW( optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon ) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) # Training logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info( " Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size ) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) best_f1 = 0.0 global_step, epochs_trained, steps_trained_in_current_epoch = 0, 0, 0 tr_loss, logging_loss = 0.0, 0.0 encoder_q.zero_grad() train_iterator = trange( epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0], ) set_seed(args) max_acc, max_acc_f1, max_f1, max_f1_acc = 0.0, 0.0, 0.0, 0.0 for idx, _ in enumerate(train_iterator): tr_loss = 0.0 epoch_iterator = tqdm( train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0] ) for step, batch in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue encoder_q.train() batch = tuple(t.to(args.device) for t in batch) inputs = { "input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3], "nodes_index_mask": batch[4], "adj_metric": batch[5], "node_mask": batch[6], "sen2node": batch[7], "sentence_mask": batch[8], "sentence_length": batch[9], "batch_id": batch[10], } if args.model_type != "distilbert": inputs["token_type_ids"] = ( batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids q_outputs, q_rep = encoder_q(**inputs) # Model outputs are always tuple in transformers (see doc). if args.n_gpu > 1: loss = loss.mean() if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps loss.backward() tr_loss += loss.item() epoch_iterator.set_description( "loss {}".format( round(tr_loss * args.gradient_accumulation_steps / (step + 1), 4) ) ) if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_( amp.master_params(optimizer), args.max_grad_norm ) else: torch.nn.utils.clip_grad_norm_( encoder_q.parameters(), args.max_grad_norm ) optimizer.step() scheduler.step() encoder_q.zero_grad() global_step += 1 if ( args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0 ): logs = {} if ( args.local_rank == -1 and args.evaluate_during_training ): # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, encoder_q, tokenizer) for key, value in results.items(): eval_key = "eval_{}".format(key) logs[eval_key] = value loss_scalar = (tr_loss - logging_loss) / args.logging_steps learning_rate_scalar = scheduler.get_lr()[0] logs["learning_rate"] = learning_rate_scalar logs["loss"] = loss_scalar logging_loss = tr_loss wandb.log({"train/loss": loss_scalar}) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.local_rank in [-1, 0] and args.save_steps > 0 and args.do_eval: results = evaluate(args, encoder_q, tokenizer, checkpoint=str(idx)) logger.info("the results is {}".format(results)) if results["f1"] > max_f1: max_f1 = results["f1"] max_f1_acc = results["acc"] if results["acc"] > max_acc: max_acc = results["acc"] max_acc_f1 = results["f1"] if results["f1"] > best_f1: best_f1 = results["f1"] output_dir = os.path.join( args.output_dir, "seed-{}".format(args.seed), "checkpoint-{}-{}".format(idx, best_f1), ) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = ( encoder_q.module if hasattr(encoder_q, "module") else encoder_q ) # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save( args, os.path.join(output_dir, "training_{}.bin".format(idx)) ) logger.info("Saving model checkpoint to %s", output_dir) torch.save( optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt") ) torch.save( scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt") ) logger.info("Saving optimizer and scheduler states to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break return_res = { "max_acc": max_acc, "max_acc_f1": max_acc_f1, "max_f1": max_f1, "max_f1_acc": max_f1_acc, } if args.do_ray: tune.report( accuracy=max_acc, max_acc_f1=max_acc_f1, f1=max_f1, max_f1_acc=max_f1_acc ) return global_step, tr_loss / global_step, return_res, output_dir def evaluate(args, model, tokenizer, checkpoint=None, prefix="", mode="dev"): eval_task_names = (args.task_name,) eval_outputs_dirs = (args.output_dir,) results = {} for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): eval_dataset = load_and_cache_examples( args, eval_task, tokenizer, evaluate=True, mode=mode ) if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly. eval_sampler = SequentialSampler(eval_dataset) eval_dataloader = DataLoader( eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size ) if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel): model = torch.nn.DataParallel(model) # Evaluation logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 preds, out_label_ids = None, None for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = { "input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3], "nodes_index_mask": batch[4], "adj_metric": batch[5], "node_mask": batch[6], "sen2node": batch[7], "sentence_mask": batch[8], "sentence_length": batch[9], } if args.model_type != "distilbert": inputs["token_type_ids"] = ( batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids outputs, _ = model(**inputs) tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = inputs["labels"].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append( out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0 ) probs = preds eval_loss = eval_loss / nb_eval_steps if args.output_mode == "classification": preds = np.argmax(preds, axis=1) elif args.output_mode == "regression": preds = np.squeeze(preds) result = compute_metrics(eval_task, preds, out_label_ids) results.update(result) output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results {} *****".format(prefix)) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) wandb.log( { "eval/acc": result["acc"], "eval/f1": result["f1"], "eval/acc_and_f1": result["acc_and_f1"], } ) return results def load_and_cache_examples( args, task, tokenizer, evaluate=False, mode="train", dataset_name="", rel="" ): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier() processor = processors[task]() output_mode = output_modes[task] # Load data features from cache or dataset file cached_features_file = os.path.join( args.data_dir, "cached_{}_{}_{}_{}_{}_{}".format( mode, list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length), str(task), str(dataset_name), str(rel), ), ) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", args.data_dir) label_list = processor.get_labels() if mode == "train": examples = processor.get_train_examples(args.with_relation, args.data_dir) elif mode == "dev": examples = processor.get_dev_examples(args.with_relation, args.data_dir) elif mode == "test": examples = processor.get_test_examples(args.with_relation, args.data_dir)
features = convert_examples_to_features(
0
2023-10-24 14:03:11+00:00
16k
deforum-studio/deforum
src/deforum/models/depth_models/zoedepth/models/zoedepth_nk/zoedepth_nk_v1.py
[ { "identifier": "DepthModel", "path": "src/deforum/models/depth_models/zoedepth/models/depth_model.py", "snippet": "class DepthModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.device = 'cuda'\n \n def to(self, device) -> nn.Module:\n self.device = device\n return super().to(device)\n \n def forward(self, x, *args, **kwargs):\n raise NotImplementedError\n \n def _infer(self, x: torch.Tensor):\n \"\"\"\n Inference interface for the model\n Args:\n x (torch.Tensor): input tensor of shape (b, c, h, w)\n Returns:\n torch.Tensor: output tensor of shape (b, 1, h, w)\n \"\"\"\n return self(x)['metric_depth']\n \n def _infer_with_pad_aug(self, x: torch.Tensor, pad_input: bool=True, fh: float=3, fw: float=3, upsampling_mode: str='bicubic', padding_mode=\"reflect\", **kwargs) -> torch.Tensor:\n \"\"\"\n Inference interface for the model with padding augmentation\n Padding augmentation fixes the boundary artifacts in the output depth map.\n Boundary artifacts are sometimes caused by the fact that the model is trained on NYU raw dataset which has a black or white border around the image.\n This augmentation pads the input image and crops the prediction back to the original size / view.\n\n Note: This augmentation is not required for the models trained with 'avoid_boundary'=True.\n Args:\n x (torch.Tensor): input tensor of shape (b, c, h, w)\n pad_input (bool, optional): whether to pad the input or not. Defaults to True.\n fh (float, optional): height padding factor. The padding is calculated as sqrt(h/2) * fh. Defaults to 3.\n fw (float, optional): width padding factor. The padding is calculated as sqrt(w/2) * fw. Defaults to 3.\n upsampling_mode (str, optional): upsampling mode. Defaults to 'bicubic'.\n padding_mode (str, optional): padding mode. Defaults to \"reflect\".\n Returns:\n torch.Tensor: output tensor of shape (b, 1, h, w)\n \"\"\"\n # assert x is nchw and c = 3\n assert x.dim() == 4, \"x must be 4 dimensional, got {}\".format(x.dim())\n assert x.shape[1] == 3, \"x must have 3 channels, got {}\".format(x.shape[1])\n\n if pad_input:\n assert fh > 0 or fw > 0, \"atlease one of fh and fw must be greater than 0\"\n pad_h = int(np.sqrt(x.shape[2]/2) * fh)\n pad_w = int(np.sqrt(x.shape[3]/2) * fw)\n padding = [pad_w, pad_w]\n if pad_h > 0:\n padding += [pad_h, pad_h]\n \n x = F.pad(x, padding, mode=padding_mode, **kwargs)\n out = self._infer(x)\n if out.shape[-2:] != x.shape[-2:]:\n out = F.interpolate(out, size=(x.shape[2], x.shape[3]), mode=upsampling_mode, align_corners=False)\n if pad_input:\n # crop to the original size, handling the case where pad_h and pad_w is 0\n if pad_h > 0:\n out = out[:, :, pad_h:-pad_h,:]\n if pad_w > 0:\n out = out[:, :, :, pad_w:-pad_w]\n return out\n \n def infer_with_flip_aug(self, x, pad_input: bool=True, **kwargs) -> torch.Tensor:\n \"\"\"\n Inference interface for the model with horizontal flip augmentation\n Horizontal flip augmentation improves the accuracy of the model by averaging the output of the model with and without horizontal flip.\n Args:\n x (torch.Tensor): input tensor of shape (b, c, h, w)\n pad_input (bool, optional): whether to use padding augmentation. Defaults to True.\n Returns:\n torch.Tensor: output tensor of shape (b, 1, h, w)\n \"\"\"\n # infer with horizontal flip and average\n out = self._infer_with_pad_aug(x, pad_input=pad_input, **kwargs)\n out_flip = self._infer_with_pad_aug(torch.flip(x, dims=[3]), pad_input=pad_input, **kwargs)\n out = (out + torch.flip(out_flip, dims=[3])) / 2\n return out\n \n def infer(self, x, pad_input: bool=True, with_flip_aug: bool=True, **kwargs) -> torch.Tensor:\n \"\"\"\n Inference interface for the model\n Args:\n x (torch.Tensor): input tensor of shape (b, c, h, w)\n pad_input (bool, optional): whether to use padding augmentation. Defaults to True.\n with_flip_aug (bool, optional): whether to use horizontal flip augmentation. Defaults to True.\n Returns:\n torch.Tensor: output tensor of shape (b, 1, h, w)\n \"\"\"\n if with_flip_aug:\n return self.infer_with_flip_aug(x, pad_input=pad_input, **kwargs)\n else:\n return self._infer_with_pad_aug(x, pad_input=pad_input, **kwargs)\n \n @torch.no_grad()\n def infer_pil(self, pil_img, pad_input: bool=True, with_flip_aug: bool=True, output_type: str=\"numpy\", **kwargs) -> Union[np.ndarray, PIL.Image.Image, torch.Tensor]:\n \"\"\"\n Inference interface for the model for PIL image\n Args:\n pil_img (PIL.Image.Image): input PIL image\n pad_input (bool, optional): whether to use padding augmentation. Defaults to True.\n with_flip_aug (bool, optional): whether to use horizontal flip augmentation. Defaults to True.\n output_type (str, optional): output type. Supported values are 'numpy', 'pil' and 'tensor'. Defaults to \"numpy\".\n \"\"\"\n x = transforms.ToTensor()(pil_img).unsqueeze(0).to(self.device)\n out_tensor = self.infer(x, pad_input=pad_input, with_flip_aug=with_flip_aug, **kwargs)\n if output_type == \"numpy\":\n return out_tensor.squeeze().cpu().numpy()\n elif output_type == \"pil\":\n # uint16 is required for depth pil image\n out_16bit_numpy = (out_tensor.squeeze().cpu().numpy()*256).astype(np.uint16)\n return Image.fromarray(out_16bit_numpy)\n elif output_type == \"tensor\":\n return out_tensor.squeeze().cpu()\n else:\n raise ValueError(f\"output_type {output_type} not supported. Supported values are 'numpy', 'pil' and 'tensor'\")" }, { "identifier": "MidasCore", "path": "src/deforum/models/depth_models/zoedepth/models/base_models/midas.py", "snippet": "class MidasCore(nn.Module):\n def __init__(self, midas, trainable=False, fetch_features=True, layer_names=('out_conv', 'l4_rn', 'r4', 'r3', 'r2', 'r1'), freeze_bn=False, keep_aspect_ratio=True,\n img_size=384, **kwargs):\n \"\"\"Midas Base model used for multi-scale feature extraction.\n\n Args:\n midas (torch.nn.Module): Midas model.\n trainable (bool, optional): Train midas model. Defaults to False.\n fetch_features (bool, optional): Extract multi-scale features. Defaults to True.\n layer_names (tuple, optional): Layers used for feature extraction. Order = (head output features, last layer features, ...decoder features). Defaults to ('out_conv', 'l4_rn', 'r4', 'r3', 'r2', 'r1').\n freeze_bn (bool, optional): Freeze BatchNorm. Generally results in better finetuning performance. Defaults to False.\n keep_aspect_ratio (bool, optional): Keep the aspect ratio of input images while resizing. Defaults to True.\n img_size (int, tuple, optional): Input resolution. Defaults to 384.\n \"\"\"\n super().__init__()\n self.core = midas\n self.output_channels = None\n self.core_out = {}\n self.trainable = trainable\n self.fetch_features = fetch_features\n # midas.scratch.output_conv = nn.Identity()\n self.handles = []\n # self.layer_names = ['out_conv','l4_rn', 'r4', 'r3', 'r2', 'r1']\n self.layer_names = layer_names\n\n self.set_trainable(trainable)\n self.set_fetch_features(fetch_features)\n\n self.prep = PrepForMidas(keep_aspect_ratio=keep_aspect_ratio,\n img_size=img_size, do_resize=kwargs.get('do_resize', True))\n\n if freeze_bn:\n self.freeze_bn()\n\n def set_trainable(self, trainable):\n self.trainable = trainable\n if trainable:\n self.unfreeze()\n else:\n self.freeze()\n return self\n\n def set_fetch_features(self, fetch_features):\n self.fetch_features = fetch_features\n if fetch_features:\n if len(self.handles) == 0:\n self.attach_hooks(self.core)\n else:\n self.remove_hooks()\n return self\n\n def freeze(self):\n for p in self.parameters():\n p.requires_grad = False\n self.trainable = False\n return self\n\n def unfreeze(self):\n for p in self.parameters():\n p.requires_grad = True\n self.trainable = True\n return self\n\n def freeze_bn(self):\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n return self\n\n def forward(self, x, denorm=False, return_rel_depth=False):\n with torch.no_grad():\n if denorm:\n x = denormalize(x)\n x = self.prep(x)\n # print(\"Shape after prep: \", x.shape)\n\n with torch.set_grad_enabled(self.trainable):\n\n # print(\"Input size to Midascore\", x.shape)\n rel_depth = self.core(x)\n # print(\"Output from midas shape\", rel_depth.shape)\n if not self.fetch_features:\n return rel_depth\n out = [self.core_out[k] for k in self.layer_names]\n\n if return_rel_depth:\n return rel_depth, out\n return out\n\n def get_rel_pos_params(self):\n for name, p in self.core.pretrained.named_parameters():\n if \"relative_position\" in name:\n yield p\n\n def get_enc_params_except_rel_pos(self):\n for name, p in self.core.pretrained.named_parameters():\n if \"relative_position\" not in name:\n yield p\n\n def freeze_encoder(self, freeze_rel_pos=False):\n if freeze_rel_pos:\n for p in self.core.pretrained.parameters():\n p.requires_grad = False\n else:\n for p in self.get_enc_params_except_rel_pos():\n p.requires_grad = False\n return self\n\n def attach_hooks(self, midas):\n if len(self.handles) > 0:\n self.remove_hooks()\n if \"out_conv\" in self.layer_names:\n self.handles.append(list(midas.scratch.output_conv.children())[\n 3].register_forward_hook(get_activation(\"out_conv\", self.core_out)))\n if \"r4\" in self.layer_names:\n self.handles.append(midas.scratch.refinenet4.register_forward_hook(\n get_activation(\"r4\", self.core_out)))\n if \"r3\" in self.layer_names:\n self.handles.append(midas.scratch.refinenet3.register_forward_hook(\n get_activation(\"r3\", self.core_out)))\n if \"r2\" in self.layer_names:\n self.handles.append(midas.scratch.refinenet2.register_forward_hook(\n get_activation(\"r2\", self.core_out)))\n if \"r1\" in self.layer_names:\n self.handles.append(midas.scratch.refinenet1.register_forward_hook(\n get_activation(\"r1\", self.core_out)))\n if \"l4_rn\" in self.layer_names:\n self.handles.append(midas.scratch.layer4_rn.register_forward_hook(\n get_activation(\"l4_rn\", self.core_out)))\n\n return self\n\n def remove_hooks(self):\n for h in self.handles:\n h.remove()\n return self\n\n def __del__(self):\n self.remove_hooks()\n\n def set_output_channels(self, model_type):\n self.output_channels = MIDAS_SETTINGS[model_type]\n\n @staticmethod\n def build(midas_model_type=\"DPT_BEiT_L_384\", train_midas=False, use_pretrained_midas=True, fetch_features=False, freeze_bn=True, force_keep_ar=False, force_reload=False, **kwargs):\n if midas_model_type not in MIDAS_SETTINGS:\n raise ValueError(\n f\"Invalid model type: {midas_model_type}. Must be one of {list(MIDAS_SETTINGS.keys())}\")\n if \"img_size\" in kwargs:\n kwargs = MidasCore.parse_img_size(kwargs)\n img_size = kwargs.pop(\"img_size\", [384, 384])\n print(\"img_size\", img_size)\n midas = torch.hub.load(\"intel-isl/MiDaS\", midas_model_type,\n pretrained=use_pretrained_midas, force_reload=force_reload)\n kwargs.update({'keep_aspect_ratio': force_keep_ar})\n midas_core = MidasCore(midas, trainable=train_midas, fetch_features=fetch_features,\n freeze_bn=freeze_bn, img_size=img_size, **kwargs)\n midas_core.set_output_channels(midas_model_type)\n return midas_core\n\n @staticmethod\n def build_from_config(config):\n return MidasCore.build(**config)\n\n @staticmethod\n def parse_img_size(config):\n assert 'img_size' in config\n if isinstance(config['img_size'], str):\n assert \",\" in config['img_size'], \"img_size should be a string with comma separated img_size=H,W\"\n config['img_size'] = list(map(int, config['img_size'].split(\",\")))\n assert len(\n config['img_size']) == 2, \"img_size should be a string with comma separated img_size=H,W\"\n elif isinstance(config['img_size'], int):\n config['img_size'] = [config['img_size'], config['img_size']]\n else:\n assert isinstance(config['img_size'], list) and len(\n config['img_size']) == 2, \"img_size should be a list of H,W\"\n return config" }, { "identifier": "AttractorLayer", "path": "src/deforum/models/depth_models/zoedepth/models/layers/attractor.py", "snippet": "class AttractorLayer(nn.Module):\n def __init__(self, in_features, n_bins, n_attractors=16, mlp_dim=128, min_depth=1e-3, max_depth=10,\n alpha=300, gamma=2, kind='sum', attractor_type='exp', memory_efficient=False):\n \"\"\"\n Attractor layer for bin centers. Bin centers are bounded on the interval (min_depth, max_depth)\n \"\"\"\n super().__init__()\n\n self.n_attractors = n_attractors\n self.n_bins = n_bins\n self.min_depth = min_depth\n self.max_depth = max_depth\n self.alpha = alpha\n self.gamma = gamma\n self.kind = kind\n self.attractor_type = attractor_type\n self.memory_efficient = memory_efficient\n\n self._net = nn.Sequential(\n nn.Conv2d(in_features, mlp_dim, 1, 1, 0),\n nn.ReLU(inplace=True),\n nn.Conv2d(mlp_dim, n_attractors*2, 1, 1, 0), # x2 for linear norm\n nn.ReLU(inplace=True)\n )\n\n def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False):\n \"\"\"\n Args:\n x (torch.Tensor) : feature block; shape - n, c, h, w\n b_prev (torch.Tensor) : previous bin centers normed; shape - n, prev_nbins, h, w\n \n Returns:\n tuple(torch.Tensor,torch.Tensor) : new bin centers normed and scaled; shape - n, nbins, h, w\n \"\"\"\n if prev_b_embedding is not None:\n if interpolate:\n prev_b_embedding = nn.functional.interpolate(\n prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True)\n x = x + prev_b_embedding\n\n A = self._net(x)\n eps = 1e-3\n A = A + eps\n n, c, h, w = A.shape\n A = A.view(n, self.n_attractors, 2, h, w)\n A_normed = A / A.sum(dim=2, keepdim=True) # n, a, 2, h, w\n A_normed = A[:, :, 0, ...] # n, na, h, w\n\n b_prev = nn.functional.interpolate(\n b_prev, (h, w), mode='bilinear', align_corners=True)\n b_centers = b_prev\n\n if self.attractor_type == 'exp':\n dist = exp_attractor\n else:\n dist = inv_attractor\n\n if not self.memory_efficient:\n func = {'mean': torch.mean, 'sum': torch.sum}[self.kind]\n # .shape N, nbins, h, w\n delta_c = func(dist(A_normed.unsqueeze(\n 2) - b_centers.unsqueeze(1)), dim=1)\n else:\n delta_c = torch.zeros_like(b_centers, device=b_centers.device)\n for i in range(self.n_attractors):\n # .shape N, nbins, h, w\n delta_c += dist(A_normed[:, i, ...].unsqueeze(1) - b_centers)\n\n if self.kind == 'mean':\n delta_c = delta_c / self.n_attractors\n\n b_new_centers = b_centers + delta_c\n B_centers = (self.max_depth - self.min_depth) * \\\n b_new_centers + self.min_depth\n B_centers, _ = torch.sort(B_centers, dim=1)\n B_centers = torch.clip(B_centers, self.min_depth, self.max_depth)\n return b_new_centers, B_centers" }, { "identifier": "AttractorLayerUnnormed", "path": "src/deforum/models/depth_models/zoedepth/models/layers/attractor.py", "snippet": "class AttractorLayerUnnormed(nn.Module):\n def __init__(self, in_features, n_bins, n_attractors=16, mlp_dim=128, min_depth=1e-3, max_depth=10,\n alpha=300, gamma=2, kind='sum', attractor_type='exp', memory_efficient=False):\n \"\"\"\n Attractor layer for bin centers. Bin centers are unbounded\n \"\"\"\n super().__init__()\n\n self.n_attractors = n_attractors\n self.n_bins = n_bins\n self.min_depth = min_depth\n self.max_depth = max_depth\n self.alpha = alpha\n self.gamma = gamma\n self.kind = kind\n self.attractor_type = attractor_type\n self.memory_efficient = memory_efficient\n\n self._net = nn.Sequential(\n nn.Conv2d(in_features, mlp_dim, 1, 1, 0),\n nn.ReLU(inplace=True),\n nn.Conv2d(mlp_dim, n_attractors, 1, 1, 0),\n nn.Softplus()\n )\n\n def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False):\n \"\"\"\n Args:\n x (torch.Tensor) : feature block; shape - n, c, h, w\n b_prev (torch.Tensor) : previous bin centers normed; shape - n, prev_nbins, h, w\n \n Returns:\n tuple(torch.Tensor,torch.Tensor) : new bin centers unbounded; shape - n, nbins, h, w. Two outputs just to keep the API consistent with the normed version\n \"\"\"\n if prev_b_embedding is not None:\n if interpolate:\n prev_b_embedding = nn.functional.interpolate(\n prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True)\n x = x + prev_b_embedding\n\n A = self._net(x)\n n, c, h, w = A.shape\n\n b_prev = nn.functional.interpolate(\n b_prev, (h, w), mode='bilinear', align_corners=True)\n b_centers = b_prev\n\n if self.attractor_type == 'exp':\n dist = exp_attractor\n else:\n dist = inv_attractor\n\n if not self.memory_efficient:\n func = {'mean': torch.mean, 'sum': torch.sum}[self.kind]\n # .shape N, nbins, h, w\n delta_c = func(\n dist(A.unsqueeze(2) - b_centers.unsqueeze(1)), dim=1)\n else:\n delta_c = torch.zeros_like(b_centers, device=b_centers.device)\n for i in range(self.n_attractors):\n delta_c += dist(A[:, i, ...].unsqueeze(1) -\n b_centers) # .shape N, nbins, h, w\n\n if self.kind == 'mean':\n delta_c = delta_c / self.n_attractors\n\n b_new_centers = b_centers + delta_c\n B_centers = b_new_centers\n\n return b_new_centers, B_centers" }, { "identifier": "ConditionalLogBinomial", "path": "src/deforum/models/depth_models/zoedepth/models/layers/dist_layers.py", "snippet": "class ConditionalLogBinomial(nn.Module):\n def __init__(self, in_features, condition_dim, n_classes=256, bottleneck_factor=2, p_eps=1e-4, max_temp=50, min_temp=1e-7, act=torch.softmax):\n \"\"\"Conditional Log Binomial distribution\n\n Args:\n in_features (int): number of input channels in main feature\n condition_dim (int): number of input channels in condition feature\n n_classes (int, optional): Number of classes. Defaults to 256.\n bottleneck_factor (int, optional): Hidden dim factor. Defaults to 2.\n p_eps (float, optional): small eps value. Defaults to 1e-4.\n max_temp (float, optional): Maximum temperature of output distribution. Defaults to 50.\n min_temp (float, optional): Minimum temperature of output distribution. Defaults to 1e-7.\n \"\"\"\n super().__init__()\n self.p_eps = p_eps\n self.max_temp = max_temp\n self.min_temp = min_temp\n self.log_binomial_transform = LogBinomial(n_classes, act=act)\n bottleneck = (in_features + condition_dim) // bottleneck_factor\n self.mlp = nn.Sequential(\n nn.Conv2d(in_features + condition_dim, bottleneck,\n kernel_size=1, stride=1, padding=0),\n nn.GELU(),\n # 2 for p linear norm, 2 for t linear norm\n nn.Conv2d(bottleneck, 2+2, kernel_size=1, stride=1, padding=0),\n nn.Softplus()\n )\n\n def forward(self, x, cond):\n \"\"\"Forward pass\n\n Args:\n x (torch.Tensor - NCHW): Main feature\n cond (torch.Tensor - NCHW): condition feature\n\n Returns:\n torch.Tensor: Output log binomial distribution\n \"\"\"\n pt = self.mlp(torch.concat((x, cond), dim=1))\n p, t = pt[:, :2, ...], pt[:, 2:, ...]\n\n p = p + self.p_eps\n p = p[:, 0, ...] / (p[:, 0, ...] + p[:, 1, ...])\n\n t = t + self.p_eps\n t = t[:, 0, ...] / (t[:, 0, ...] + t[:, 1, ...])\n t = t.unsqueeze(1)\n t = (self.max_temp - self.min_temp) * t + self.min_temp\n\n return self.log_binomial_transform(p, t)" }, { "identifier": "Projector", "path": "src/deforum/models/depth_models/zoedepth/models/layers/localbins_layers.py", "snippet": "class Projector(nn.Module):\n def __init__(self, in_features, out_features, mlp_dim=128):\n \"\"\"Projector MLP\n\n Args:\n in_features (int): input channels\n out_features (int): output channels\n mlp_dim (int, optional): hidden dimension. Defaults to 128.\n \"\"\"\n super().__init__()\n\n self._net = nn.Sequential(\n nn.Conv2d(in_features, mlp_dim, 1, 1, 0),\n nn.ReLU(inplace=True),\n nn.Conv2d(mlp_dim, out_features, 1, 1, 0),\n )\n\n def forward(self, x):\n return self._net(x)" }, { "identifier": "SeedBinRegressor", "path": "src/deforum/models/depth_models/zoedepth/models/layers/localbins_layers.py", "snippet": "class SeedBinRegressor(nn.Module):\n def __init__(self, in_features, n_bins=16, mlp_dim=256, min_depth=1e-3, max_depth=10):\n \"\"\"Bin center regressor network. Bin centers are bounded on (min_depth, max_depth) interval.\n\n Args:\n in_features (int): input channels\n n_bins (int, optional): Number of bin centers. Defaults to 16.\n mlp_dim (int, optional): Hidden dimension. Defaults to 256.\n min_depth (float, optional): Min depth value. Defaults to 1e-3.\n max_depth (float, optional): Max depth value. Defaults to 10.\n \"\"\"\n super().__init__()\n self.version = \"1_1\"\n self.min_depth = min_depth\n self.max_depth = max_depth\n\n self._net = nn.Sequential(\n nn.Conv2d(in_features, mlp_dim, 1, 1, 0),\n nn.ReLU(inplace=True),\n nn.Conv2d(mlp_dim, n_bins, 1, 1, 0),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, x):\n \"\"\"\n Returns tensor of bin_width vectors (centers). One vector b for every pixel\n \"\"\"\n B = self._net(x)\n eps = 1e-3\n B = B + eps\n B_widths_normed = B / B.sum(dim=1, keepdim=True)\n B_widths = (self.max_depth - self.min_depth) * \\\n B_widths_normed # .shape NCHW\n # pad has the form (left, right, top, bottom, front, back)\n B_widths = nn.functional.pad(\n B_widths, (0, 0, 0, 0, 1, 0), mode='constant', value=self.min_depth)\n B_edges = torch.cumsum(B_widths, dim=1) # .shape NCHW\n\n B_centers = 0.5 * (B_edges[:, :-1, ...] + B_edges[:, 1:, ...])\n return B_widths_normed, B_centers" }, { "identifier": "SeedBinRegressorUnnormed", "path": "src/deforum/models/depth_models/zoedepth/models/layers/localbins_layers.py", "snippet": "class SeedBinRegressorUnnormed(nn.Module):\n def __init__(self, in_features, n_bins=16, mlp_dim=256, min_depth=1e-3, max_depth=10):\n \"\"\"Bin center regressor network. Bin centers are unbounded\n\n Args:\n in_features (int): input channels\n n_bins (int, optional): Number of bin centers. Defaults to 16.\n mlp_dim (int, optional): Hidden dimension. Defaults to 256.\n min_depth (float, optional): Not used. (for compatibility with SeedBinRegressor)\n max_depth (float, optional): Not used. (for compatibility with SeedBinRegressor)\n \"\"\"\n super().__init__()\n self.version = \"1_1\"\n self._net = nn.Sequential(\n nn.Conv2d(in_features, mlp_dim, 1, 1, 0),\n nn.ReLU(inplace=True),\n nn.Conv2d(mlp_dim, n_bins, 1, 1, 0),\n nn.Softplus()\n )\n\n def forward(self, x):\n \"\"\"\n Returns tensor of bin_width vectors (centers). One vector b for every pixel\n \"\"\"\n B_centers = self._net(x)\n return B_centers, B_centers" }, { "identifier": "PatchTransformerEncoder", "path": "src/deforum/models/depth_models/zoedepth/models/layers/patch_transformer.py", "snippet": "class PatchTransformerEncoder(nn.Module):\n def __init__(self, in_channels, patch_size=10, embedding_dim=128, num_heads=4, use_class_token=False):\n \"\"\"ViT-like transformer block\n\n Args:\n in_channels (int): Input channels\n patch_size (int, optional): patch size. Defaults to 10.\n embedding_dim (int, optional): Embedding dimension in transformer model. Defaults to 128.\n num_heads (int, optional): number of attention heads. Defaults to 4.\n use_class_token (bool, optional): Whether to use extra token at the start for global accumulation (called as \"class token\"). Defaults to False.\n \"\"\"\n super(PatchTransformerEncoder, self).__init__()\n self.use_class_token = use_class_token\n encoder_layers = nn.TransformerEncoderLayer(\n embedding_dim, num_heads, dim_feedforward=1024)\n self.transformer_encoder = nn.TransformerEncoder(\n encoder_layers, num_layers=4) # takes shape S,N,E\n\n self.embedding_convPxP = nn.Conv2d(in_channels, embedding_dim,\n kernel_size=patch_size, stride=patch_size, padding=0)\n \n def positional_encoding_1d(self, sequence_length, batch_size, embedding_dim, device='cpu'):\n \"\"\"Generate positional encodings\n\n Args:\n sequence_length (int): Sequence length\n embedding_dim (int): Embedding dimension\n\n Returns:\n torch.Tensor SBE: Positional encodings\n \"\"\"\n position = torch.arange(\n 0, sequence_length, dtype=torch.float32, device=device).unsqueeze(1)\n index = torch.arange(\n 0, embedding_dim, 2, dtype=torch.float32, device=device).unsqueeze(0)\n div_term = torch.exp(index * (-torch.log(torch.tensor(10000.0, device=device)) / embedding_dim))\n pos_encoding = position * div_term\n pos_encoding = torch.cat([torch.sin(pos_encoding), torch.cos(pos_encoding)], dim=1)\n pos_encoding = pos_encoding.unsqueeze(1).repeat(1, batch_size, 1)\n return pos_encoding\n \n\n def forward(self, x):\n \"\"\"Forward pass\n\n Args:\n x (torch.Tensor - NCHW): Input feature tensor\n\n Returns:\n torch.Tensor - SNE: Transformer output embeddings. S - sequence length (=HW/patch_size^2), N - batch size, E - embedding dim\n \"\"\"\n embeddings = self.embedding_convPxP(x).flatten(\n 2) # .shape = n,c,s = n, embedding_dim, s\n if self.use_class_token:\n # extra special token at start ?\n embeddings = nn.functional.pad(embeddings, (1, 0))\n \n # change to S,N,E format required by transformer\n embeddings = embeddings.permute(2, 0, 1)\n S, N, E = embeddings.shape\n embeddings = embeddings + self.positional_encoding_1d(S, N, E, device=embeddings.device)\n x = self.transformer_encoder(embeddings) # .shape = S, N, E\n return x" }, { "identifier": "load_state_from_resource", "path": "src/deforum/models/depth_models/zoedepth/models/model_io.py", "snippet": "def load_state_from_resource(model, resource: str):\n \"\"\"Loads weights to the model from a given resource. A resource can be of following types:\n 1. URL. Prefixed with \"url::\"\n e.g. url::http(s)://url.resource.com/ckpt.pt\n\n 2. Local path. Prefixed with \"local::\"\n e.g. local::/path/to/ckpt.pt\n\n\n Args:\n model (torch.nn.Module): Model\n resource (str): resource string\n\n Returns:\n torch.nn.Module: Model with loaded weights\n \"\"\"\n print(f\"Using pretrained resource {resource}\")\n\n if resource.startswith('url::'):\n url = resource.split('url::')[1]\n return load_state_dict_from_url(model, url, progress=True)\n\n elif resource.startswith('local::'):\n path = resource.split('local::')[1]\n return load_wts(model, path)\n \n else:\n raise ValueError(\"Invalid resource type, only url:: and local:: are supported\")" } ]
import itertools import torch import torch.nn as nn from ..depth_model import DepthModel from ..base_models.midas import MidasCore from ..layers.attractor import AttractorLayer, AttractorLayerUnnormed from ..layers.dist_layers import ConditionalLogBinomial from ..layers.localbins_layers import (Projector, SeedBinRegressor, SeedBinRegressorUnnormed) from ..layers.patch_transformer import PatchTransformerEncoder from ..model_io import load_state_from_resource
11,156
max_depth = conf['max_depth'] seed_bin_regressor = self.seed_bin_regressors[bin_conf_name] _, seed_b_centers = seed_bin_regressor(x) if self.bin_centers_type == 'normed' or self.bin_centers_type == 'hybrid2': b_prev = (seed_b_centers - min_depth) / (max_depth - min_depth) else: b_prev = seed_b_centers prev_b_embedding = self.seed_projector(x) attractors = self.attractors[bin_conf_name] for projector, attractor, x in zip(self.projectors, attractors, x_blocks): b_embedding = projector(x) b, b_centers = attractor( b_embedding, b_prev, prev_b_embedding, interpolate=True) b_prev = b prev_b_embedding = b_embedding last = outconv_activation b_centers = nn.functional.interpolate( b_centers, last.shape[-2:], mode='bilinear', align_corners=True) b_embedding = nn.functional.interpolate( b_embedding, last.shape[-2:], mode='bilinear', align_corners=True) clb = self.conditional_log_binomial[bin_conf_name] x = clb(last, b_embedding) # Now depth value is Sum px * cx , where cx are bin_centers from the last bin tensor # print(x.shape, b_centers.shape) # b_centers = nn.functional.interpolate(b_centers, x.shape[-2:], mode='bilinear', align_corners=True) out = torch.sum(x * b_centers, dim=1, keepdim=True) output = dict(domain_logits=domain_logits, metric_depth=out) if return_final_centers or return_probs: output['bin_centers'] = b_centers if return_probs: output['probs'] = x return output def get_lr_params(self, lr): """ Learning rate configuration for different layers of the model Args: lr (float) : Base learning rate Returns: list : list of parameters to optimize and their learning rates, in the format required by torch optimizers. """ param_conf = [] if self.train_midas: def get_rel_pos_params(): for name, p in self.core.core.pretrained.named_parameters(): if "relative_position" in name: yield p def get_enc_params_except_rel_pos(): for name, p in self.core.core.pretrained.named_parameters(): if "relative_position" not in name: yield p encoder_params = get_enc_params_except_rel_pos() rel_pos_params = get_rel_pos_params() midas_params = self.core.core.scratch.parameters() midas_lr_factor = self.midas_lr_factor if self.is_midas_pretrained else 1.0 param_conf.extend([ {'params': encoder_params, 'lr': lr / self.encoder_lr_factor}, {'params': rel_pos_params, 'lr': lr / self.pos_enc_lr_factor}, {'params': midas_params, 'lr': lr / midas_lr_factor} ]) remaining_modules = [] for name, child in self.named_children(): if name != 'core': remaining_modules.append(child) remaining_params = itertools.chain( *[child.parameters() for child in remaining_modules]) param_conf.append({'params': remaining_params, 'lr': lr}) return param_conf def get_conf_parameters(self, conf_name): """ Returns parameters of all the ModuleDicts children that are exclusively used for the given bin configuration """ params = [] for name, child in self.named_children(): if isinstance(child, nn.ModuleDict): for bin_conf_name, module in child.items(): if bin_conf_name == conf_name: params += list(module.parameters()) return params def freeze_conf(self, conf_name): """ Freezes all the parameters of all the ModuleDicts children that are exclusively used for the given bin configuration """ for p in self.get_conf_parameters(conf_name): p.requires_grad = False def unfreeze_conf(self, conf_name): """ Unfreezes all the parameters of all the ModuleDicts children that are exclusively used for the given bin configuration """ for p in self.get_conf_parameters(conf_name): p.requires_grad = True def freeze_all_confs(self): """ Freezes all the parameters of all the ModuleDicts children """ for name, child in self.named_children(): if isinstance(child, nn.ModuleDict): for bin_conf_name, module in child.items(): for p in module.parameters(): p.requires_grad = False @staticmethod def build(midas_model_type="DPT_BEiT_L_384", pretrained_resource=None, use_pretrained_midas=False, train_midas=False, freeze_midas_bn=True, **kwargs):
# MIT License # Copyright (c) 2022 Intelligent Systems Lab Org # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # File author: Shariq Farooq Bhat class ZoeDepthNK(DepthModel): def __init__(self, core, bin_conf, bin_centers_type="softplus", bin_embedding_dim=128, n_attractors=[16, 8, 4, 1], attractor_alpha=300, attractor_gamma=2, attractor_kind='sum', attractor_type='exp', min_temp=5, max_temp=50, memory_efficient=False, train_midas=True, is_midas_pretrained=True, midas_lr_factor=1, encoder_lr_factor=10, pos_enc_lr_factor=10, inverse_midas=False, **kwargs): """ZoeDepthNK model. This is the version of ZoeDepth that has two metric heads and uses a learned router to route to experts. Args: core (models.base_models.midas.MidasCore): The base midas model that is used for extraction of "relative" features bin_conf (List[dict]): A list of dictionaries that contain the bin configuration for each metric head. Each dictionary should contain the following keys: "name" (str, typically same as the dataset name), "n_bins" (int), "min_depth" (float), "max_depth" (float) The length of this list determines the number of metric heads. bin_centers_type (str, optional): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers. For "softplus", softplus activation is used and thus are unbounded. Defaults to "normed". bin_embedding_dim (int, optional): bin embedding dimension. Defaults to 128. n_attractors (List[int], optional): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1]. attractor_alpha (int, optional): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 300. attractor_gamma (int, optional): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2. attractor_kind (str, optional): Attraction aggregation "sum" or "mean". Defaults to 'sum'. attractor_type (str, optional): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'exp'. min_temp (int, optional): Lower bound for temperature of output probability distribution. Defaults to 5. max_temp (int, optional): Upper bound for temperature of output probability distribution. Defaults to 50. memory_efficient (bool, optional): Whether to use memory efficient version of attractor layers. Memory efficient version is slower but is recommended incase of multiple metric heads in order save GPU memory. Defaults to False. train_midas (bool, optional): Whether to train "core", the base midas model. Defaults to True. is_midas_pretrained (bool, optional): Is "core" pretrained? Defaults to True. midas_lr_factor (int, optional): Learning rate reduction factor for base midas model except its encoder and positional encodings. Defaults to 10. encoder_lr_factor (int, optional): Learning rate reduction factor for the encoder in midas model. Defaults to 10. pos_enc_lr_factor (int, optional): Learning rate reduction factor for positional encodings in the base midas model. Defaults to 10. """ super().__init__() self.core = core self.bin_conf = bin_conf self.min_temp = min_temp self.max_temp = max_temp self.memory_efficient = memory_efficient self.train_midas = train_midas self.is_midas_pretrained = is_midas_pretrained self.midas_lr_factor = midas_lr_factor self.encoder_lr_factor = encoder_lr_factor self.pos_enc_lr_factor = pos_enc_lr_factor self.inverse_midas = inverse_midas N_MIDAS_OUT = 32 btlnck_features = self.core.output_channels[0] num_out_features = self.core.output_channels[1:] # self.scales = [16, 8, 4, 2] # spatial scale factors self.conv2 = nn.Conv2d( btlnck_features, btlnck_features, kernel_size=1, stride=1, padding=0) # Transformer classifier on the bottleneck self.patch_transformer = PatchTransformerEncoder( btlnck_features, 1, 128, use_class_token=True) self.mlp_classifier = nn.Sequential( nn.Linear(128, 128), nn.ReLU(), nn.Linear(128, 2) ) if bin_centers_type == "normed": SeedBinRegressorLayer = SeedBinRegressor Attractor = AttractorLayer elif bin_centers_type == "softplus": SeedBinRegressorLayer = SeedBinRegressorUnnormed Attractor = AttractorLayerUnnormed elif bin_centers_type == "hybrid1": SeedBinRegressorLayer = SeedBinRegressor Attractor = AttractorLayerUnnormed elif bin_centers_type == "hybrid2": SeedBinRegressorLayer = SeedBinRegressorUnnormed Attractor = AttractorLayer else: raise ValueError( "bin_centers_type should be one of 'normed', 'softplus', 'hybrid1', 'hybrid2'") self.bin_centers_type = bin_centers_type # We have bins for each bin conf. # Create a map (ModuleDict) of 'name' -> seed_bin_regressor self.seed_bin_regressors = nn.ModuleDict( {conf['name']: SeedBinRegressorLayer(btlnck_features, conf["n_bins"], mlp_dim=bin_embedding_dim // 2, min_depth=conf["min_depth"], max_depth=conf["max_depth"]) for conf in bin_conf} ) self.seed_projector = Projector( btlnck_features, bin_embedding_dim, mlp_dim=bin_embedding_dim // 2) self.projectors = nn.ModuleList([ Projector(num_out, bin_embedding_dim, mlp_dim=bin_embedding_dim // 2) for num_out in num_out_features ]) # Create a map (ModuleDict) of 'name' -> attractors (ModuleList) self.attractors = nn.ModuleDict( {conf['name']: nn.ModuleList([ Attractor(bin_embedding_dim, n_attractors[i], mlp_dim=bin_embedding_dim, alpha=attractor_alpha, gamma=attractor_gamma, kind=attractor_kind, attractor_type=attractor_type, memory_efficient=memory_efficient, min_depth=conf["min_depth"], max_depth=conf["max_depth"]) for i in range(len(n_attractors)) ]) for conf in bin_conf} ) last_in = N_MIDAS_OUT # conditional log binomial for each bin conf self.conditional_log_binomial = nn.ModuleDict( {conf['name']: ConditionalLogBinomial(last_in, bin_embedding_dim, conf['n_bins'], bottleneck_factor=4, min_temp=self.min_temp, max_temp=self.max_temp) for conf in bin_conf} ) def forward(self, x, return_final_centers=False, denorm=False, return_probs=False, **kwargs): """ Args: x (torch.Tensor): Input image tensor of shape (B, C, H, W). Assumes all images are from the same domain. return_final_centers (bool, optional): Whether to return the final centers of the attractors. Defaults to False. denorm (bool, optional): Whether to denormalize the input image. Defaults to False. return_probs (bool, optional): Whether to return the probabilities of the bins. Defaults to False. Returns: dict: Dictionary of outputs with keys: - "rel_depth": Relative depth map of shape (B, 1, H, W) - "metric_depth": Metric depth map of shape (B, 1, H, W) - "domain_logits": Domain logits of shape (B, 2) - "bin_centers": Bin centers of shape (B, N, H, W). Present only if return_final_centers is True - "probs": Bin probabilities of shape (B, N, H, W). Present only if return_probs is True """ b, c, h, w = x.shape self.orig_input_width = w self.orig_input_height = h rel_depth, out = self.core(x, denorm=denorm, return_rel_depth=True) outconv_activation = out[0] btlnck = out[1] x_blocks = out[2:] x_d0 = self.conv2(btlnck) x = x_d0 # Predict which path to take embedding = self.patch_transformer(x)[0] # N, E domain_logits = self.mlp_classifier(embedding) # N, 2 domain_vote = torch.softmax(domain_logits.sum( dim=0, keepdim=True), dim=-1) # 1, 2 # Get the path bin_conf_name = ["nyu", "kitti"][torch.argmax( domain_vote, dim=-1).squeeze().item()] try: conf = [c for c in self.bin_conf if c["name"] == bin_conf_name][0] except IndexError: raise ValueError( f"bin_conf_name {bin_conf_name} not found in bin_confs") min_depth = conf['min_depth'] max_depth = conf['max_depth'] seed_bin_regressor = self.seed_bin_regressors[bin_conf_name] _, seed_b_centers = seed_bin_regressor(x) if self.bin_centers_type == 'normed' or self.bin_centers_type == 'hybrid2': b_prev = (seed_b_centers - min_depth) / (max_depth - min_depth) else: b_prev = seed_b_centers prev_b_embedding = self.seed_projector(x) attractors = self.attractors[bin_conf_name] for projector, attractor, x in zip(self.projectors, attractors, x_blocks): b_embedding = projector(x) b, b_centers = attractor( b_embedding, b_prev, prev_b_embedding, interpolate=True) b_prev = b prev_b_embedding = b_embedding last = outconv_activation b_centers = nn.functional.interpolate( b_centers, last.shape[-2:], mode='bilinear', align_corners=True) b_embedding = nn.functional.interpolate( b_embedding, last.shape[-2:], mode='bilinear', align_corners=True) clb = self.conditional_log_binomial[bin_conf_name] x = clb(last, b_embedding) # Now depth value is Sum px * cx , where cx are bin_centers from the last bin tensor # print(x.shape, b_centers.shape) # b_centers = nn.functional.interpolate(b_centers, x.shape[-2:], mode='bilinear', align_corners=True) out = torch.sum(x * b_centers, dim=1, keepdim=True) output = dict(domain_logits=domain_logits, metric_depth=out) if return_final_centers or return_probs: output['bin_centers'] = b_centers if return_probs: output['probs'] = x return output def get_lr_params(self, lr): """ Learning rate configuration for different layers of the model Args: lr (float) : Base learning rate Returns: list : list of parameters to optimize and their learning rates, in the format required by torch optimizers. """ param_conf = [] if self.train_midas: def get_rel_pos_params(): for name, p in self.core.core.pretrained.named_parameters(): if "relative_position" in name: yield p def get_enc_params_except_rel_pos(): for name, p in self.core.core.pretrained.named_parameters(): if "relative_position" not in name: yield p encoder_params = get_enc_params_except_rel_pos() rel_pos_params = get_rel_pos_params() midas_params = self.core.core.scratch.parameters() midas_lr_factor = self.midas_lr_factor if self.is_midas_pretrained else 1.0 param_conf.extend([ {'params': encoder_params, 'lr': lr / self.encoder_lr_factor}, {'params': rel_pos_params, 'lr': lr / self.pos_enc_lr_factor}, {'params': midas_params, 'lr': lr / midas_lr_factor} ]) remaining_modules = [] for name, child in self.named_children(): if name != 'core': remaining_modules.append(child) remaining_params = itertools.chain( *[child.parameters() for child in remaining_modules]) param_conf.append({'params': remaining_params, 'lr': lr}) return param_conf def get_conf_parameters(self, conf_name): """ Returns parameters of all the ModuleDicts children that are exclusively used for the given bin configuration """ params = [] for name, child in self.named_children(): if isinstance(child, nn.ModuleDict): for bin_conf_name, module in child.items(): if bin_conf_name == conf_name: params += list(module.parameters()) return params def freeze_conf(self, conf_name): """ Freezes all the parameters of all the ModuleDicts children that are exclusively used for the given bin configuration """ for p in self.get_conf_parameters(conf_name): p.requires_grad = False def unfreeze_conf(self, conf_name): """ Unfreezes all the parameters of all the ModuleDicts children that are exclusively used for the given bin configuration """ for p in self.get_conf_parameters(conf_name): p.requires_grad = True def freeze_all_confs(self): """ Freezes all the parameters of all the ModuleDicts children """ for name, child in self.named_children(): if isinstance(child, nn.ModuleDict): for bin_conf_name, module in child.items(): for p in module.parameters(): p.requires_grad = False @staticmethod def build(midas_model_type="DPT_BEiT_L_384", pretrained_resource=None, use_pretrained_midas=False, train_midas=False, freeze_midas_bn=True, **kwargs):
core = MidasCore.build(midas_model_type=midas_model_type, use_pretrained_midas=use_pretrained_midas,
1
2023-10-28 14:23:27+00:00
16k
samholt/ActiveObservingInContinuous-timeControl
mppi_dataset_collector.py
[ { "identifier": "dotdict", "path": "config.py", "snippet": "class dotdict(dict):\n \"\"\"dot.notation access to dictionary attributes\"\"\"\n\n __getattr__ = dict.get\n __setattr__ = dict.__setitem__\n __delattr__ = dict.__delitem__" }, { "identifier": "create_env", "path": "overlay.py", "snippet": "def create_env(env_name, dt=0.05, ts_grid=\"fixed\", noise=0.0, friction=False, device=device):\n if \"oderl\" in env_name:\n env = create_oderl_env(env_name, dt=dt, ts_grid=ts_grid, noise=noise, friction=friction, device=device)\n else:\n env = gym.make(env_name)\n return env" }, { "identifier": "setup_logger", "path": "overlay.py", "snippet": "def setup_logger(file, log_folder=\"logs\", return_path_to_log=False):\n import logging\n import os\n import time\n\n file_name = os.path.basename(os.path.realpath(file)).split(\".py\")[0]\n from pathlib import Path\n\n Path(f\"./{log_folder}\").mkdir(parents=True, exist_ok=True)\n path_run_name = \"{}-{}\".format(file_name, time.strftime(\"%Y%m%d-%H%M%S\"))\n logging.basicConfig(\n format=\"%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s\",\n handlers=[\n logging.FileHandler(f\"{log_folder}/{path_run_name}_log.txt\"),\n logging.StreamHandler(),\n ],\n datefmt=\"%H:%M:%S\",\n level=logging.INFO,\n )\n logger = logging.getLogger()\n logger.info(f\"Starting: Log file at: {log_folder}/{path_run_name}_log.txt\")\n if return_path_to_log:\n return logger, f\"{log_folder}/{path_run_name}_log.txt\"\n else:\n return logger" }, { "identifier": "start_virtual_display", "path": "overlay.py", "snippet": "def start_virtual_display():\n import pyvirtualdisplay\n\n return pyvirtualdisplay.Display(visible=0, size=(1400, 900)).start()" }, { "identifier": "step_env", "path": "overlay.py", "snippet": "def step_env(env, action, obs_noise):\n at = torch.from_numpy(action).to(device)\n\n def g(state, t):\n return at\n\n returns = env.integrate_system(2, g, s0=torch.tensor(env.state).to(device), return_states=True)\n state = returns[-1][-1]\n reward = returns[2][-1]\n tsn = returns[-2][-1, -1]\n env.set_state_(state.cpu().numpy())\n state_out = env.get_obs()\n state_out = torch.from_numpy(state_out).to(device)\n state_out += torch.randn_like(state_out) * obs_noise\n env.time_step += 1\n done = True if env.time_step >= env.n_steps else False\n state_out = state_out.cpu().numpy()\n return state_out, reward, done, tsn" }, { "identifier": "MPPI", "path": "planners/mppi.py", "snippet": "class MPPI:\n \"\"\"\n Model Predictive Path Integral control\n This implementation batch samples the trajectories and so scales well with the number of samples K.\n\n Implemented according to algorithm 2 in Williams et al., 2017\n 'Information Theoretic MPC for Model-Based Reinforcement Learning',\n based off of https://github.com/ferreirafabio/mppi_pendulum\n \"\"\"\n\n def __init__(\n self,\n dynamics,\n running_cost,\n nx,\n noise_sigma,\n num_samples=100,\n horizon=15,\n device=\"cuda:0\",\n terminal_state_cost=None,\n lambda_=1.0,\n noise_mu=None,\n u_min=None,\n u_max=None,\n u_init=None,\n U_init=None,\n u_scale=1,\n u_per_command=1,\n step_dependent_dynamics=False,\n rollout_samples=1, # Ensemble size\n rollout_var_cost=0,\n rollout_var_discount=0.95,\n dt=0.05,\n sample_null_action=False,\n noise_abs_cost=False,\n ):\n \"\"\"\n :param dynamics: function(state, action) -> next_state (K x nx) taking in batch state (K x nx) and action (K x nu)\n :param running_cost: function(state, action) -> cost (K) taking in batch state and action (same as dynamics)\n :param nx: state dimension\n :param noise_sigma: (nu x nu) control noise covariance (assume v_t ~ N(u_t, noise_sigma))\n :param num_samples: K, number of trajectories to sample\n :param horizon: T, length of each trajectory\n :param device: pytorch device\n :param terminal_state_cost: function(state) -> cost (K x 1) taking in batch state\n :param lambda_: temperature, positive scalar where larger values will allow more exploration\n :param noise_mu: (nu) control noise mean (used to bias control samples); defaults to zero mean\n :param u_min: (nu) minimum values for each dimension of control to pass into dynamics\n :param u_max: (nu) maximum values for each dimension of control to pass into dynamics\n :param u_init: (nu) what to initialize new end of trajectory control to be; defeaults to zero\n :param U_init: (T x nu) initial control sequence; defaults to noise\n :param step_dependent_dynamics: whether the passed in dynamics needs horizon step passed in (as 3rd arg)\n :param rollout_samples: M, number of state trajectories to rollout for each control trajectory\n (should be 1 for deterministic dynamics and more for models that output a distribution)\n :param rollout_var_cost: Cost attached to the variance of costs across trajectory rollouts\n :param rollout_var_discount: Discount of variance cost over control horizon\n :param sample_null_action: Whether to explicitly sample a null action (bad for starting in a local minima)\n :param noise_abs_cost: Whether to use the absolute value of the action noise to avoid bias when all states have the same cost\n \"\"\"\n self.d = device\n self.dtype = noise_sigma.dtype\n self.K = num_samples # N_SAMPLES\n self.T = horizon # TIMESTEPS\n self.dt = dt\n\n # dimensions of state and control\n self.nx = nx\n self.nu = 1 if len(noise_sigma.shape) == 0 else noise_sigma.shape[0]\n self.lambda_ = lambda_\n\n if noise_mu is None:\n noise_mu = torch.zeros(self.nu, dtype=self.dtype)\n\n if u_init is None:\n u_init = torch.zeros_like(noise_mu)\n\n # handle 1D edge case\n if self.nu == 1:\n noise_mu = noise_mu.view(-1)\n noise_sigma = noise_sigma.view(-1, 1)\n\n # bounds\n self.u_min = u_min\n self.u_max = u_max\n self.u_scale = u_scale\n self.u_per_command = u_per_command\n # make sure if any of them is specified, both are specified\n if self.u_max is not None and self.u_min is None:\n if not torch.is_tensor(self.u_max):\n self.u_max = torch.tensor(self.u_max)\n self.u_min = -self.u_max\n if self.u_min is not None and self.u_max is None:\n if not torch.is_tensor(self.u_min):\n self.u_min = torch.tensor(self.u_min)\n self.u_max = -self.u_min\n if self.u_min is not None:\n self.u_min = self.u_min.to(device=self.d)\n self.u_max = self.u_max.to(device=self.d)\n\n self.noise_mu = noise_mu.to(self.d)\n self.noise_sigma = noise_sigma.to(self.d)\n self.noise_sigma_inv = torch.inverse(self.noise_sigma)\n self.noise_dist = MultivariateNormal(self.noise_mu, covariance_matrix=self.noise_sigma)\n # T x nu control sequence\n self.U = U_init\n self.u_init = u_init.to(self.d)\n\n if self.U is None:\n self.U = self.noise_dist.sample((self.T,))\n\n self.step_dependency = step_dependent_dynamics\n self.F = dynamics\n self.running_cost = running_cost\n self.terminal_state_cost = terminal_state_cost\n self.sample_null_action = sample_null_action\n self.noise_abs_cost = noise_abs_cost\n self.state = None\n\n # handling dynamics models that output a distribution (take multiple trajectory samples)\n self.M = rollout_samples\n self.rollout_var_cost = rollout_var_cost\n self.rollout_var_discount = rollout_var_discount\n\n # sampled results from last command\n self.cost_total = None\n self.cost_total_non_zero = None\n self.omega = None\n self.states_mu = None\n self.states_var = None\n self.actions = None\n\n def _dynamics(self, state, u, t):\n return self.F(state, u, t) if self.step_dependency else self.F(state, u)\n\n # @handle_batch_input\n def _running_cost(self, state, u):\n return self.running_cost(state, u)\n\n def command(self, state):\n \"\"\"\n :param state: (nx) or (K x nx) current state, or samples of states (for propagating a distribution of states)\n :returns action: (nu) best action\n \"\"\"\n # shift command 1 time step\n self.U = torch.roll(self.U, -1, dims=0)\n self.U[-1] = self.u_init\n\n if not torch.is_tensor(state):\n state = torch.tensor(state)\n self.state = state.to(dtype=self.dtype, device=self.d)\n\n cost_total = self._compute_total_cost_batch()\n logger.debug(f\"cost_total: {cost_total.shape}\")\n\n beta = torch.min(cost_total)\n self.cost_total_non_zero = _ensure_non_zero(cost_total, beta, 1 / self.lambda_)\n\n eta = torch.sum(self.cost_total_non_zero)\n self.omega = (1.0 / eta) * self.cost_total_non_zero\n for t in range(self.T):\n self.U[t] += torch.sum(self.omega.view(-1, 1) * self.noise[:, t], dim=0)\n action = self.U[: self.u_per_command]\n # reduce dimensionality if we only need the first command\n if self.u_per_command == 1:\n action = action[0]\n\n logger.debug(f\"action: {action}\")\n return action * self.u_scale\n\n def reset(self):\n \"\"\"\n Clear controller state after finishing a trial\n \"\"\"\n self.U = self.noise_dist.sample((self.T,))\n\n def _compute_rollout_costs(self, perturbed_actions):\n K, T, nu = perturbed_actions.shape\n assert nu == self.nu\n\n cost_total = torch.zeros(K, device=self.d, dtype=self.dtype)\n cost_samples = cost_total.repeat(self.M, 1)\n cost_var = torch.zeros_like(cost_total)\n\n # allow propagation of a sample of states (ex. to carry a distribution), or to start with a single state\n if self.state.shape == (K, self.nx):\n state_mu = self.state\n else:\n state_mu = self.state.view(1, -1).repeat(K, 1)\n\n logger.debug(f\"state: {state_mu.shape}\")\n\n states_mu = []\n states_var = []\n actions = []\n perturbed_actions = self.u_scale * perturbed_actions\n for t in range(T):\n u = perturbed_actions[:, t, :]\n state_mu, state_var = self._dynamics(state_mu, u, t)\n c = self._running_cost(state_mu, u)\n cost_samples += c\n if self.M > 1:\n cost_var += c.var(dim=0) * (self.rollout_var_discount**t)\n\n # Save total states/actions\n states_mu.append(state_mu)\n states_var.append(state_var)\n actions.append(u)\n\n # Actions is K x T x nu\n # States is K x T x nx\n actions = torch.stack(actions, dim=-2)\n states_mu = torch.stack(states_mu, dim=-2)\n states_var = torch.stack(states_var, dim=-2)\n logger.debug(f\"states: {states_mu.shape}\")\n\n # action perturbation cost\n if self.terminal_state_cost:\n c = self.terminal_state_cost(states_mu, actions)\n cost_samples += c\n cost_total += cost_samples.mean(dim=0)\n cost_total += cost_var * self.rollout_var_cost\n logger.debug(f\"{cost_total.shape} | {states_mu.shape} | {actions.shape}\")\n return cost_total, states_mu, states_var, actions\n\n def _compute_total_cost_batch(self):\n # parallelize sampling across trajectories\n # resample noise each time we take an action\n self.noise = self.noise_dist.sample((self.K, self.T)) # K x T x nu\n self.perturbed_action = self.U + self.noise\n if self.sample_null_action:\n self.perturbed_action[self.K - 1] = 0\n # naively bound control\n self.perturbed_action = self._bound_action(self.perturbed_action * self.u_scale)\n self.perturbed_action /= self.u_scale\n # bounded noise after bounding (some got cut off, so we don't penalize that in action cost)\n self.noise = self.perturbed_action - self.U\n if self.noise_abs_cost:\n action_cost = self.lambda_ * torch.abs(self.noise) @ self.noise_sigma_inv\n # NOTE: The original paper does self.lambda_ * self.noise @ self.noise_sigma_inv, but this biases\n # the actions with low noise if all states have the same cost. With abs(noise) we prefer actions close to the\n # nominal trajectory.\n else:\n action_cost = self.lambda_ * self.noise @ self.noise_sigma_inv # Like original paper\n logger.debug(f\"action_cost: {action_cost.shape}\")\n\n self.cost_total, self.states_mu, self.states_var, self.actions = self._compute_rollout_costs(\n self.perturbed_action\n )\n self.actions /= self.u_scale\n\n # action perturbation cost\n perturbation_cost = torch.sum(self.U * action_cost, dim=(1, 2))\n self.cost_total += perturbation_cost\n return self.cost_total\n\n def _bound_action(self, action):\n if self.u_max is not None:\n action = torch.clamp(action, min=self.u_min, max=self.u_max)\n return action\n\n def get_rollouts(self, state, num_rollouts=1):\n \"\"\"\n :param state: either (nx) vector or (num_rollouts x nx) for sampled initial states\n :param num_rollouts: Number of rollouts with same action sequence - for generating samples with stochastic\n dynamics\n :returns states: num_rollouts x T x nx vector of trajectories\n\n \"\"\"\n state = state.view(-1, self.nx)\n if state.size(0) == 1:\n state = state.repeat(num_rollouts, 1)\n\n T = self.U.shape[0]\n states = torch.zeros((num_rollouts, T + 1, self.nx), dtype=self.U.dtype, device=self.U.device)\n states[:, 0] = state\n for t in range(T):\n states[:, t + 1] = self._dynamics(\n states[:, t].view(num_rollouts, -1), self.u_scale * self.U[t].view(num_rollouts, -1), t\n )\n return states[:, 1:]" }, { "identifier": "MPPIActiveObserving", "path": "planners/mppi_active_observing.py", "snippet": "class MPPIActiveObserving:\n \"\"\"\n Model Predictive Path Integral control\n This implementation batch samples the trajectories and so scales well with the number of samples K.\n\n Implemented according to algorithm 2 in Williams et al., 2017\n 'Information Theoretic MPC for Model-Based Reinforcement Learning',\n based off of https://github.com/ferreirafabio/mppi_pendulum\n \"\"\"\n\n def __init__(\n self,\n dynamics,\n running_cost,\n nx,\n noise_sigma,\n cost_var_from_state_var=None,\n num_samples=100,\n horizon=15,\n device=\"cuda:0\",\n terminal_state_cost=None,\n observing_var_threshold=1.0,\n lambda_=1.0,\n noise_mu=None,\n u_min=None,\n u_max=None,\n u_init=None,\n U_init=None,\n u_scale=1,\n u_per_command=1,\n rollout_samples=1, # Ensemble size\n rollout_var_cost=0,\n rollout_var_discount=0.95,\n dt_simulation=0.01,\n dt=0.05,\n sampling_policy=\"discrete_planning\",\n continuous_time_threshold=0.5,\n observing_cost=1.0,\n sample_null_action=False,\n observing_fixed_frequency=1,\n discrete_planning=False,\n discrete_interval=1,\n limit_actions_to_only_positive=False,\n fixed_continuous_planning_steps=None,\n debug_mode_return_full_cost_std=False,\n debug_mode_cp_return_continuous_reward_unc=False,\n noise_abs_cost=False,\n ):\n \"\"\"\n :param dynamics: function(state, action) -> next_state (K x nx) taking in batch state (K x nx) and action (K x nu)\n :param running_cost: function(state, action) -> cost (K) taking in batch state and action (same as dynamics)\n :param nx: state dimension\n :param noise_sigma: (nu x nu) control noise covariance (assume v_t ~ N(u_t, noise_sigma))\n :param num_samples: K, number of trajectories to sample\n :param horizon: T, length of each trajectory\n :param device: pytorch device\n :param terminal_state_cost: function(state) -> cost (K x 1) taking in batch state\n :param lambda_: temperature, positive scalar where larger values will allow more exploration\n :param noise_mu: (nu) control noise mean (used to bias control samples); defaults to zero mean\n :param u_min: (nu) minimum values for each dimension of control to pass into dynamics\n :param u_max: (nu) maximum values for each dimension of control to pass into dynamics\n :param u_init: (nu) what to initialize new end of trajectory control to be; defeaults to zero\n :param U_init: (T x nu) initial control sequence; defaults to noise\n :param rollout_samples: M, number of state trajectories to rollout for each control trajectory\n (should be 1 for deterministic dynamics and more for models that output a distribution)\n :param rollout_var_cost: Cost attached to the variance of costs across trajectory rollouts\n :param rollout_var_discount: Discount of variance cost over control horizon\n :param sample_null_action: Whether to explicitly sample a null action (bad for starting in a local minima)\n :param noise_abs_cost: Whether to use the absolute value of the action noise to avoid bias when all states have the same cost\n \"\"\"\n self.d = device\n self.dt_simulation = dt_simulation\n if discrete_planning:\n dt_plan = dt_simulation * discrete_interval\n else:\n dt_plan = dt\n self.discrete_planning = discrete_planning\n self.discrete_interval = discrete_interval\n self.limit_actions_to_only_positive = limit_actions_to_only_positive\n self.continuous_time_interval = max(int(continuous_time_threshold * discrete_interval), 1)\n self.dtype = noise_sigma.dtype\n self.K = num_samples # N_SAMPLES\n self.T = horizon # TIMESTEPS\n self.dt = dt_plan\n self.observing_cost = observing_cost # Hyperparameter to be tuned\n self.observing_var_threshold = observing_var_threshold # Hyperparameter to be tuned\n self.observing_fixed_frequency = observing_fixed_frequency\n\n # dimensions of state and control\n self.nx = nx\n self.nu = 1 if len(noise_sigma.shape) == 0 else noise_sigma.shape[0]\n self.lambda_ = lambda_\n\n if noise_mu is None:\n noise_mu = torch.zeros(self.nu, dtype=self.dtype)\n\n if u_init is None:\n u_init = torch.zeros_like(noise_mu)\n\n # handle 1D edge case\n if self.nu == 1:\n noise_mu = noise_mu.view(-1)\n noise_sigma = noise_sigma.view(-1, 1)\n\n # bounds\n self.u_min = u_min\n self.u_max = u_max\n self.u_scale = u_scale\n self.u_per_command = u_per_command\n # make sure if any of them is specified, both are specified\n if self.u_max is not None and self.u_min is None:\n if not torch.is_tensor(self.u_max):\n self.u_max = torch.tensor(self.u_max)\n self.u_min = -self.u_max\n if self.u_min is not None and self.u_max is None:\n if not torch.is_tensor(self.u_min):\n self.u_min = torch.tensor(self.u_min)\n self.u_max = -self.u_min\n if self.u_min is not None:\n self.u_min = self.u_min.to(device=self.d)\n self.u_max = self.u_max.to(device=self.d)\n\n self.noise_mu = noise_mu.to(self.d)\n self.noise_sigma = noise_sigma.to(self.d)\n self.noise_sigma_inv = torch.inverse(self.noise_sigma)\n self.noise_dist = MultivariateNormal(self.noise_mu, covariance_matrix=self.noise_sigma)\n # T x nu control sequence\n self.U = U_init\n self.u_init = u_init.to(self.d)\n\n if self.U is None:\n self.U = self.noise_dist.sample((self.T,))\n\n self.F = dynamics\n self.running_cost = running_cost\n self.terminal_state_cost = terminal_state_cost\n self.sample_null_action = sample_null_action\n self.noise_abs_cost = noise_abs_cost\n self.state = None\n\n # handling dynamics models that output a distribution (take multiple trajectory samples)\n self.M = rollout_samples\n self.rollout_var_cost = rollout_var_cost\n self.rollout_var_discount = rollout_var_discount\n\n # sampled results from last command\n self.cost_total = None\n self.cost_total_non_zero = None\n self.omega = None\n self.states_mu = None\n self.states_var = None\n self.actions = None\n\n self.sampling_policy = sampling_policy\n self.cost_var_from_state_var = cost_var_from_state_var\n\n self.previous_step = 0\n self.fixed_continuous_planning_steps = fixed_continuous_planning_steps\n self.debug_mode_return_full_cost_std = debug_mode_return_full_cost_std\n self.debug_mode_cp_return_continuous_reward_unc = debug_mode_cp_return_continuous_reward_unc\n\n def _dynamics(self, state, u, ts_pred, return_var=True):\n if self.limit_actions_to_only_positive:\n u[u <= 0] = 0\n return self.F(state, u, ts_pred, return_var=return_var)\n\n def _cost_var_from_state_var(self, state_var):\n if not self.cost_var_from_state_var is None:\n return self.cost_var_from_state_var(state_var)\n else:\n return state_var.sum()\n\n # @handle_batch_input\n def _running_cost(self, state, u):\n return self.running_cost(state, u)\n\n def reset(self):\n \"\"\"\n Clear controller state after finishing a trial\n \"\"\"\n self.U = self.noise_dist.sample((self.T,))\n\n def _compute_rollout_costs(self, perturbed_actions):\n K, T, nu = perturbed_actions.shape\n assert nu == self.nu\n\n cost_total = torch.zeros(K, device=self.d, dtype=self.dtype)\n cost_samples = cost_total.repeat(self.M, 1)\n cost_var = torch.zeros_like(cost_total)\n\n # allow propagation of a sample of states (ex. to carry a distribution), or to start with a single state\n if self.state.shape == (K, self.nx):\n state_mu = self.state\n else:\n state_mu = self.state.view(1, -1).repeat(K, 1)\n\n logger.debug(f\"state: {state_mu.shape}\")\n\n states_mu = []\n # states_var = []\n actions = []\n perturbed_actions = self.u_scale * perturbed_actions\n ts_pred = torch.tensor(self.dt, device=self.d, dtype=self.dtype).view(1, 1).repeat(K, 1)\n\n for t in range(T):\n u = perturbed_actions[:, t, :]\n state_mu, _ = self._dynamics(state_mu, u, ts_pred, return_var=False)\n c = self._running_cost(state_mu, u)\n cost_samples += c\n if self.M > 1:\n cost_var += c.var(dim=0) * (self.rollout_var_discount**t)\n\n # Save total states/actions\n states_mu.append(state_mu)\n actions.append(u)\n\n # Actions is K x T x nu\n # States is K x T x nx\n actions = torch.stack(actions, dim=-2)\n states_mu = torch.stack(states_mu, dim=-2)\n logger.debug(f\"states: {states_mu.shape}\")\n\n # action perturbation cost\n if self.terminal_state_cost:\n c = self.terminal_state_cost(states_mu, actions)\n cost_samples += c\n cost_total += cost_samples.mean(dim=0)\n cost_total += cost_var * self.rollout_var_cost\n logger.debug(f\"{cost_total.shape} | {states_mu.shape} | {actions.shape}\")\n return cost_total, states_mu, actions\n\n def _compute_total_cost_batch(self):\n # parallelize sampling across trajectories\n # resample noise each time we take an action\n self.noise = self.noise_dist.sample((self.K, self.T)) # K x T x nu\n self.perturbed_action = self.U + self.noise\n if self.sample_null_action:\n self.perturbed_action[self.K - 1] = 0\n # naively bound control\n self.perturbed_action = self._bound_action(self.perturbed_action * self.u_scale)\n self.perturbed_action /= self.u_scale\n # bounded noise after bounding (some got cut off, so we don't penalize that in action cost)\n self.noise = self.perturbed_action - self.U\n if self.noise_abs_cost:\n action_cost = self.lambda_ * torch.abs(self.noise) @ self.noise_sigma_inv\n # NOTE: The original paper does self.lambda_ * self.noise @ self.noise_sigma_inv, but this biases\n # the actions with low noise if all states have the same cost. With abs(noise) we prefer actions close to the\n # nominal trajectory.\n else:\n action_cost = self.lambda_ * self.noise @ self.noise_sigma_inv # Like original paper\n logger.debug(f\"action_cost: {action_cost.shape}\")\n\n self.cost_total, self.states_mu, self.actions = self._compute_rollout_costs(self.perturbed_action)\n self.actions /= self.u_scale\n\n # action perturbation cost\n perturbation_cost = torch.sum(self.U * action_cost, dim=(1, 2)) # wonder if can remove?\n self.cost_total += perturbation_cost\n return self.cost_total\n\n def _bound_action(self, action):\n if self.u_max is not None:\n action = torch.clamp(action, min=self.u_min, max=self.u_max)\n return action\n\n def get_rollouts(self, state, num_rollouts=1):\n \"\"\"\n :param state: either (nx) vector or (num_rollouts x nx) for sampled initial states\n :param num_rollouts: Number of rollouts with same action sequence - for generating samples with stochastic\n dynamics\n :returns states: num_rollouts x T x nx vector of trajectories\n\n \"\"\"\n state = state.view(-1, self.nx)\n if state.size(0) == 1:\n state = state.repeat(num_rollouts, 1)\n\n T = self.U.shape[0]\n states = torch.zeros((num_rollouts, T + 1, self.nx), dtype=self.U.dtype, device=self.U.device)\n states[:, 0] = state\n ts_pred = torch.tensor(self.dt, device=self.d, dtype=self.dtype).view(1, 1).repeat(num_rollouts, 1)\n for t in range(T):\n states[:, t + 1] = self._dynamics(\n states[:, t].view(num_rollouts, -1), self.u_scale * self.U[t].view(num_rollouts, -1), ts_pred\n )\n return states[:, 1:]\n\n def command(self, state):\n \"\"\"\n :param state: (nx) or (K x nx) current state, or samples of states (for propagating a distribution of states)\n :returns action: (nu) best action\n \"\"\"\n self.U = torch.zeros_like(self.U)\n\n if not torch.is_tensor(state):\n state = torch.tensor(state)\n self.state = state.to(dtype=self.dtype, device=self.d)\n assert not torch.isnan(state).any(), \"Nan detected in state\"\n\n cost_total = self._compute_total_cost_batch()\n logger.debug(f\"cost_total: {cost_total.shape}\")\n\n beta = torch.min(cost_total)\n self.cost_total_non_zero = _ensure_non_zero(cost_total, beta, 1 / self.lambda_)\n\n eta = torch.sum(self.cost_total_non_zero)\n self.omega = (1.0 / eta) * self.cost_total_non_zero\n for t in range(self.T):\n self.U[t] += torch.sum(self.omega.view(-1, 1) * self.noise[:, t], dim=0)\n\n # Calculate the state estimate of the reward here, then use that for planning etc.\n if self.debug_mode_cp_return_continuous_reward_unc and self.sampling_policy == \"continuous_planning\":\n # Monte Carlo Simulation of latest reward variance\n L = self.K * 10\n ts_pred = torch.tensor(self.dt, device=self.d, dtype=self.dtype).view(1, 1).repeat(L, 1)\n ts_pred_increment = (\n torch.arange(self.dt_simulation, self.dt, self.dt_simulation, device=self.d, dtype=self.dtype)\n .repeat_interleave(L)\n .view(-1, 1)\n )\n cost_var = torch.zeros_like(cost_total)\n if self.state.shape == (L, self.nx):\n state_mu = self.state\n else:\n state_mu = self.state.view(1, -1).repeat(L, 1)\n state_mu_in = state_mu\n costs_std = []\n costs_std.append(torch.tensor(0, device=self.d, dtype=self.dtype).view(1))\n same_actions = self.U.unsqueeze(0).repeat(L, 1, 1)\n for t in range(self.T):\n u = same_actions[:, t, :]\n # Core parts\n state_mu_pred, state_var_pred = self._dynamics(state_mu_in, u, ts_pred, return_var=True)\n state_mu_final = state_mu_pred + torch.normal(0, 1, size=state_mu_pred.shape).to(self.d) * torch.sqrt(\n state_var_pred\n )\n c = self._running_cost(state_mu_final, u)\n # Intermediate states\n intermediate_state_count = self.discrete_interval - 1\n state_mu_pred_increment, state_var_pred_increment = self._dynamics(\n state_mu_in.repeat(intermediate_state_count, 1),\n u.repeat(intermediate_state_count, 1),\n ts_pred_increment,\n return_var=True,\n )\n state_mu_increment = state_mu_pred_increment + torch.normal(\n 0, 1, size=state_mu_pred_increment.shape\n ).to(self.d) * torch.sqrt(state_var_pred_increment)\n c_increment = self._running_cost(state_mu_increment, u.repeat(intermediate_state_count, 1))\n inter_c_stds = c_increment.view(intermediate_state_count, -1).std(dim=1)\n costs_std.append(torch.cat((inter_c_stds, c.std().view(1))))\n state_mu_in = state_mu_final\n # States is K x T x nx\n costs_std_continuous = torch.cat(costs_std)[1:]\n stats = {\n \"costs_std_median\": costs_std_continuous.median().item(),\n \"costs_std_mean\": costs_std_continuous.mean().item(),\n \"costs_std_max\": costs_std_continuous.max().item(),\n }\n if self.debug_mode_return_full_cost_std:\n return torch.cat(costs_std).cpu()\n elif self.sampling_policy == \"active_observing_control\":\n # Monte Carlo Simulation of latest reward variance\n L = self.K * 10\n ts_pred = torch.tensor(self.dt, device=self.d, dtype=self.dtype).view(1, 1).repeat(L, 1)\n ts_pred_increment = (\n torch.arange(self.dt_simulation, self.dt, self.dt_simulation, device=self.d, dtype=self.dtype)\n .repeat_interleave(L)\n .view(-1, 1)\n )\n cost_var = torch.zeros_like(cost_total)\n if self.state.shape == (L, self.nx):\n state_mu = self.state\n else:\n state_mu = self.state.view(1, -1).repeat(L, 1)\n state_mu_in = state_mu\n costs_std = []\n costs_std.append(torch.tensor(0, device=self.d, dtype=self.dtype).view(1))\n same_actions = self.U.unsqueeze(0).repeat(L, 1, 1)\n select_actions_up_to = self.T * self.discrete_interval # Initial default value\n for t in range(self.T):\n u = same_actions[:, t, :]\n # Core parts\n state_mu_pred, state_var_pred = self._dynamics(state_mu_in, u, ts_pred, return_var=True)\n state_mu_final = state_mu_pred + torch.normal(0, 1, size=state_mu_pred.shape).to(self.d) * torch.sqrt(\n state_var_pred\n )\n c = self._running_cost(state_mu_final, u)\n if c.std() >= self.observing_var_threshold:\n t_upper = ts_pred.view(-1)[0]\n t_lower = torch.tensor(0.0).to(self.d)\n while (t_upper - t_lower) > self.dt_simulation:\n t_mid = (t_upper + t_lower) / 2.0\n state_mu_pred_increment, state_var_pred_increment = self._dynamics(\n state_mu_in, u, torch.ones_like(ts_pred) * t_mid, return_var=True\n )\n state_mu_increment = state_mu_pred_increment + torch.normal(\n 0, 1, size=state_mu_pred_increment.shape\n ).to(self.d) * torch.sqrt(state_var_pred_increment)\n c_increment = self._running_cost(state_mu_increment, u)\n if c_increment.std() >= self.observing_var_threshold:\n t_upper = t_mid\n else:\n t_lower = t_mid\n select_actions_up_to = (\n t * self.discrete_interval\n + torch.floor((t_mid / ts_pred.view(-1)[0]) * self.discrete_interval).int().item()\n )\n break\n state_mu_in = state_mu_final\n stats = {}\n else:\n # Monte Carlo Simulation of latest reward variance\n L = self.K * 10\n ts_pred = torch.tensor(self.dt, device=self.d, dtype=self.dtype).view(1, 1).repeat(L, 1)\n cost_var = torch.zeros_like(cost_total)\n if self.state.shape == (L, self.nx):\n state_mu = self.state\n else:\n state_mu = self.state.view(1, -1).repeat(L, 1)\n states_mu = []\n states_var = []\n costs = []\n same_actions = self.U.unsqueeze(0).repeat(L, 1, 1)\n for t in range(self.T):\n u = same_actions[:, t, :]\n state_mu, state_var = self._dynamics(state_mu, u, ts_pred, return_var=True)\n state_mu = state_mu + torch.normal(0, 1, size=state_mu.shape).to(self.d) * torch.sqrt(state_var)\n c = self._running_cost(state_mu, u)\n if self.M > 1: # Untested, however should underperform - MPPI with uncertaintity paper\n cost_var += c.var(dim=0) * (self.rollout_var_discount**t)\n\n # Save total states/actions\n costs.append(c)\n states_mu.append(state_mu)\n states_var.append(state_var)\n\n # States is K x T x nx\n states_mu = torch.stack(states_mu, dim=-2)\n states_var = torch.stack(states_var, dim=-2)\n costs = torch.stack(costs, dim=-2)\n costs_std_discrete = torch.cat(\n (torch.tensor(0, device=self.d, dtype=self.dtype).view(1), costs.std(dim=1))\n )[1:]\n stats = {\n \"costs_std_median\": costs_std_discrete.median().item(),\n \"costs_std_mean\": costs_std_discrete.mean().item(),\n \"costs_std_max\": costs_std_discrete.max().item(),\n }\n if self.debug_mode_return_full_cost_std:\n return (\n torch.cat((torch.tensor(0, device=self.d, dtype=self.dtype).view(1), costs.std(dim=1)))\n .repeat_interleave(self.discrete_interval)\n .cpu()\n )\n\n if self.sampling_policy == \"discrete_monitoring\":\n actions = self.U[costs_std_discrete < self.observing_var_threshold]\n if actions.shape[0] == 0:\n actions = self.U[: self.u_per_command]\n costs_std_discrete = costs_std_discrete[: self.u_per_command]\n else:\n costs_std_discrete = costs_std_discrete[costs_std_discrete < self.observing_var_threshold]\n elif self.sampling_policy == \"discrete_planning\" or self.sampling_policy == \"continuous_planning\":\n if self.fixed_continuous_planning_steps is None:\n if not self.debug_mode_cp_return_continuous_reward_unc:\n actions = self.U[: self.observing_fixed_frequency]\n costs_std_discrete = costs_std_discrete[: self.observing_fixed_frequency]\n else:\n actions = self.U[: self.observing_fixed_frequency]\n costs_std_continuous = costs_std_continuous[\n : self.observing_fixed_frequency * self.continuous_time_interval\n ]\n costs_std_discrete = torch.tensor(0, device=self.d, dtype=self.dtype).view(1)\n else:\n actions = self.U\n costs_std_discrete = costs_std_discrete\n elif self.sampling_policy == \"active_observing_control\":\n actions = self.U\n actions = actions.repeat_interleave(self.discrete_interval, dim=0)\n slice_to_take_holder = torch.zeros((actions.shape[0])).bool()\n slice_to_take_holder[:select_actions_up_to] = True\n actions = actions[slice_to_take_holder]\n if actions.shape[0] <= (self.continuous_time_interval - 1):\n self.previous_step = int(np.ceil(actions.shape[0] / self.discrete_interval))\n actions = self.U.repeat_interleave(self.discrete_interval, dim=0)\n actions = actions[: self.continuous_time_interval]\n else:\n self.previous_step = int(actions.shape[0] / self.discrete_interval)\n assert not torch.isnan(actions).any(), \"Nan detected in actions\"\n costs_std_continuous = torch.ones_like(actions).to(self.d)\n return actions * self.u_scale, costs_std_continuous, stats\n else:\n raise NotImplementedError(f\"sampling_policy: {self.sampling_policy} not recognized\")\n self.previous_step = actions.shape[0]\n assert not torch.isnan(actions).any(), \"Nan detected in actions\"\n if self.discrete_planning:\n actions = actions.repeat_interleave(self.discrete_interval, dim=0)\n costs_std_discrete = costs_std_discrete.repeat_interleave(self.discrete_interval, dim=0)\n if self.sampling_policy == \"continuous_planning\":\n if self.fixed_continuous_planning_steps is None:\n actions = actions[: self.continuous_time_interval]\n if not self.debug_mode_cp_return_continuous_reward_unc:\n costs_std_discrete = costs_std_discrete[: self.continuous_time_interval]\n else:\n costs_std_discrete = costs_std_continuous\n self.previous_step = int(np.ceil(actions.shape[0] / self.discrete_interval))\n else:\n actions = actions[: self.fixed_continuous_planning_steps]\n costs_std_discrete = costs_std_discrete[: self.fixed_continuous_planning_steps]\n self.previous_step = int(np.ceil(actions.shape[0] / self.discrete_interval))\n return actions * self.u_scale, costs_std_discrete, stats" } ]
import logging import os import time import imageio import numpy as np import torch import torch.multiprocessing as multiprocessing from functools import partial from tqdm import tqdm from config import dotdict from overlay import create_env, setup_logger, start_virtual_display, step_env from planners.mppi import MPPI from planners.mppi_active_observing import MPPIActiveObserving from oracle import pendulum_dynamics_dt from oracle import cartpole_dynamics_dt from oracle import acrobot_dynamics_dt from oracle import cancer_dynamics_dt from pathlib import Path from config import get_config, seed_all
11,134
nu, device=device, dtype=dtype ) * (gamma - off_diagonal) logger.info(mppi_noise_sigma) mppi_lambda_ = 1.0 random_action_noise = config.collect_expert_random_action_noise if model_name == "random": def dynamics(state, perturbed_action): pass elif model_name == "oracle": oracle_sigma = config.observation_noise if env_name == "oderl-pendulum": dynamics_oracle = pendulum_dynamics_dt elif env_name == "oderl-cartpole": dynamics_oracle = cartpole_dynamics_dt elif env_name == "oderl-acrobot": dynamics_oracle = acrobot_dynamics_dt elif env_name == "oderl-cancer": dynamics_oracle = cancer_dynamics_dt def dynamics(*args, **kwargs): state_mu = dynamics_oracle(*args, **kwargs) return state_mu, torch.ones_like(state_mu) * oracle_sigma dynamics = partial(dynamics, friction=config.friction) def running_cost(state, action): if state_constraint: reward = env.diff_obs_reward_( state, exp_reward=False, state_constraint=state_constraint ) + env.diff_ac_reward_(action) elif change_goal: global change_goal_flipped reward = env.diff_obs_reward_( state, exp_reward=False, change_goal=change_goal, change_goal_flipped=change_goal_flipped ) + env.diff_ac_reward_(action) else: reward = env.diff_obs_reward_(state, exp_reward=False) + env.diff_ac_reward_(action) cost = -reward return cost if config.planner == "mppi": mppi_gym = MPPI( dynamics, running_cost, nx, mppi_noise_sigma, num_samples=roll_outs, horizon=time_steps, device=device, lambda_=mppi_lambda_, u_min=torch.tensor(ACTION_LOW), u_max=torch.tensor(ACTION_HIGH), u_scale=ACTION_HIGH, ) elif config.planner == "mppi_active_observing": mppi_gym = MPPIActiveObserving( dynamics, running_cost, nx, mppi_noise_sigma, num_samples=roll_outs, horizon=time_steps, device=device, lambda_=mppi_lambda_, u_min=torch.tensor(ACTION_LOW), u_max=torch.tensor(ACTION_HIGH), u_scale=ACTION_HIGH, observing_cost=config.observing_cost, sampling_policy=config.sampling_policy, observing_var_threshold=config.observing_var_threshold, limit_actions_to_only_positive=limit_actions_to_only_positive, dt=dt, ) if save_video: start_virtual_display() videos_folder = "./logs/new_videos" Path(videos_folder).mkdir(parents=True, exist_ok=True) filename = f"{videos_folder}/{env_name}_{model_name}_{uniq}.mp4" fps = int(1 / dt) def loop(): s0 = [] a0 = [] sn = [] ts = [] ACTION_LOW = env.action_space.low[0] ACTION_HIGH = env.action_space.high[0] it = 0 total_reward = 0 env.reset() start_time = time.perf_counter() mppi_gym.reset() while it < iter_: if change_goal_flipped_iter_ < it: change_goal_flipped = True state = env.get_obs() s0.append(state) command_start = time.perf_counter() if model_name != "random": action, costs_std = mppi_gym.command(state) if random_action_noise is not None: action += ( (torch.rand(nu, device=device) - 0.5) * 2.0 * env.action_space.high[0] ) * random_action_noise action = action.clip(min=ACTION_LOW, max=ACTION_HIGH) action = action.view(nu) else: action = torch.from_numpy(env.action_space.sample()) elapsed = time.perf_counter() - command_start
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") logger = logging.getLogger() def inner_mppi_with_model_collect_data( seed, model_name, env_name, roll_outs=1000, time_steps=30, lambda_=1.0, sigma=1.0, dt=0.05, model_seed=11, save_video=False, state_constraint=False, change_goal=False, encode_obs_time=False, model=None, uniq=None, log_debug=False, episodes_per_sampler_task=10, config={}, iter_=200, change_goal_flipped_iter_=False, ts_grid="exp", intermediate_run=False, ): config = dotdict(config) env = create_env(env_name, dt=dt, ts_grid=ts_grid, friction=config.friction) ACTION_LOW = env.action_space.low[0] ACTION_HIGH = env.action_space.high[0] if env_name == "oderl-cancer": limit_actions_to_only_positive = True else: limit_actions_to_only_positive = False nx = env.get_obs().shape[0] nu = env.action_space.shape[0] dtype = torch.float32 gamma = sigma**2 off_diagonal = 0.5 * gamma mppi_noise_sigma = torch.ones((nu, nu), device=device, dtype=dtype) * off_diagonal + torch.eye( nu, device=device, dtype=dtype ) * (gamma - off_diagonal) logger.info(mppi_noise_sigma) mppi_lambda_ = 1.0 random_action_noise = config.collect_expert_random_action_noise if model_name == "random": def dynamics(state, perturbed_action): pass elif model_name == "oracle": oracle_sigma = config.observation_noise if env_name == "oderl-pendulum": dynamics_oracle = pendulum_dynamics_dt elif env_name == "oderl-cartpole": dynamics_oracle = cartpole_dynamics_dt elif env_name == "oderl-acrobot": dynamics_oracle = acrobot_dynamics_dt elif env_name == "oderl-cancer": dynamics_oracle = cancer_dynamics_dt def dynamics(*args, **kwargs): state_mu = dynamics_oracle(*args, **kwargs) return state_mu, torch.ones_like(state_mu) * oracle_sigma dynamics = partial(dynamics, friction=config.friction) def running_cost(state, action): if state_constraint: reward = env.diff_obs_reward_( state, exp_reward=False, state_constraint=state_constraint ) + env.diff_ac_reward_(action) elif change_goal: global change_goal_flipped reward = env.diff_obs_reward_( state, exp_reward=False, change_goal=change_goal, change_goal_flipped=change_goal_flipped ) + env.diff_ac_reward_(action) else: reward = env.diff_obs_reward_(state, exp_reward=False) + env.diff_ac_reward_(action) cost = -reward return cost if config.planner == "mppi": mppi_gym = MPPI( dynamics, running_cost, nx, mppi_noise_sigma, num_samples=roll_outs, horizon=time_steps, device=device, lambda_=mppi_lambda_, u_min=torch.tensor(ACTION_LOW), u_max=torch.tensor(ACTION_HIGH), u_scale=ACTION_HIGH, ) elif config.planner == "mppi_active_observing": mppi_gym = MPPIActiveObserving( dynamics, running_cost, nx, mppi_noise_sigma, num_samples=roll_outs, horizon=time_steps, device=device, lambda_=mppi_lambda_, u_min=torch.tensor(ACTION_LOW), u_max=torch.tensor(ACTION_HIGH), u_scale=ACTION_HIGH, observing_cost=config.observing_cost, sampling_policy=config.sampling_policy, observing_var_threshold=config.observing_var_threshold, limit_actions_to_only_positive=limit_actions_to_only_positive, dt=dt, ) if save_video: start_virtual_display() videos_folder = "./logs/new_videos" Path(videos_folder).mkdir(parents=True, exist_ok=True) filename = f"{videos_folder}/{env_name}_{model_name}_{uniq}.mp4" fps = int(1 / dt) def loop(): s0 = [] a0 = [] sn = [] ts = [] ACTION_LOW = env.action_space.low[0] ACTION_HIGH = env.action_space.high[0] it = 0 total_reward = 0 env.reset() start_time = time.perf_counter() mppi_gym.reset() while it < iter_: if change_goal_flipped_iter_ < it: change_goal_flipped = True state = env.get_obs() s0.append(state) command_start = time.perf_counter() if model_name != "random": action, costs_std = mppi_gym.command(state) if random_action_noise is not None: action += ( (torch.rand(nu, device=device) - 0.5) * 2.0 * env.action_space.high[0] ) * random_action_noise action = action.clip(min=ACTION_LOW, max=ACTION_HIGH) action = action.view(nu) else: action = torch.from_numpy(env.action_space.sample()) elapsed = time.perf_counter() - command_start
state, reward, done, tsn = step_env(env, action.detach().cpu().numpy(), obs_noise=config.observation_noise)
4
2023-10-24 16:19:14+00:00
16k
s1tools/s1-etad
s1etad/_jupyter_support.py
[ { "identifier": "Sentinel1Etad", "path": "s1etad/product.py", "snippet": "class Sentinel1Etad:\n \"\"\"Sentinel-1 ETAD product.\n\n Class to decode and access the elements of the Sentinel ETAD product\n which specification is governed by ETAD-DLR-PS-0014.\n\n The index operator [] (implemented with the __getitem__ method) returns\n a Sentinel1EtadSwath instance.\n\n Parameters\n ----------\n product : str or pathlib.Path\n path of the S1-ETAD product (it is a directory)\n\n Attributes\n ----------\n product : pathlib.Path\n path of the S1-ETAD product (it is a directory)\n burst_catalogue : pandas.DataFrame\n dataframe containing main information of all bursts present in\n the product\n ds : netCDF.Dataset\n (provisional) the NetCDF.Dataset in which data are stored\n \"\"\"\n\n def __init__(self, product):\n # TODO: make this read-only (property)\n self.product = pathlib.Path(product)\n # TODO: ds should not be exposed\n self.ds = self._init_measurement_dataset()\n self._annot = self._init_annotation_dataset()\n self.burst_catalogue = self._init_burst_catalogue()\n\n def _init_measurement_dataset(self):\n \"\"\"Open the nc dataset.\"\"\"\n # @TODO: retrieve form manifest\n netcdf_file = next(self.product.glob(\"measurement/*.nc\"))\n rootgrp = Dataset(netcdf_file, \"r\")\n rootgrp.set_auto_mask(False)\n return rootgrp\n\n def _init_annotation_dataset(self):\n \"\"\"Open the xml annotation dataset.\"\"\"\n list_ = [i for i in self.product.glob(\"annotation/*.xml\")]\n xml_file = str(list_[0])\n root = etree.parse(xml_file).getroot()\n return root\n\n @functools.lru_cache()\n def __getitem__(self, index):\n assert index in self.swath_list, f\"{index} is not in {self.swath_list}\"\n return Sentinel1EtadSwath(self.ds[index])\n\n def __iter__(self):\n yield from self.iter_swaths()\n\n def __repr__(self):\n return f'{self.__class__.__name__}(\"{self.product}\") # 0x{id(self):x}'\n\n def __str__(self):\n return f'{self.__class__.__name__}(\"{self.product.name}\")'\n\n @property\n def number_of_swath(self):\n \"\"\"The number of swaths in the product.\"\"\"\n return len(self.ds.groups)\n\n @property\n def swath_list(self):\n \"\"\"The list of swath identifiers (str) in the product.\"\"\"\n return list(self.ds.groups.keys())\n\n def s1_product_list(self):\n \"\"\"Return the list of S-1 products used to compose the ETAD one.\"\"\"\n df = self.burst_catalogue\n\n # this ensures that each product name is located at the correct pIndex\n product_list = [\n item[1] for item in sorted(set(zip(df[\"pIndex\"], df[\"productID\"])))\n ]\n\n return product_list\n\n @property\n def grid_spacing(self):\n \"\"\"Return the grid spacing in meters.\"\"\"\n xp_list = {\n \"x\": \".//correctionGridRangeSampling\",\n \"y\": \".//correctionGridAzimuthSampling\",\n }\n dd = {}\n for tag, xp in xp_list.items():\n dd[tag] = self._xpath_to_list(self._annot, xp, dtype=float)\n dd[\"unit\"] = \"m\"\n return dd\n\n @property\n def grid_sampling(self):\n \"\"\"Return the grid spacing in s.\"\"\"\n xp_list = {\n \"x\": \".//productInformation/gridSampling/range\",\n \"y\": \".//productInformation/gridSampling/azimuth\",\n }\n dd = {}\n for tag, xp in xp_list.items():\n dd[tag] = self._xpath_to_list(self._annot, xp, dtype=float)\n dd[\"unit\"] = \"s\"\n return dd\n\n @property\n def min_azimuth_time(self):\n \"\"\"The minimum azimuth time of all bursts in the product.\"\"\"\n return datetime.datetime.fromisoformat(self.ds.azimuthTimeMin)\n\n @property\n def max_azimuth_time(self):\n \"\"\"The maximum azimuth time of all bursts in the product.\"\"\"\n return datetime.datetime.fromisoformat(self.ds.azimuthTimeMax)\n\n @property\n def min_range_time(self):\n \"\"\"The minimum range time of all bursts in the product.\"\"\"\n return self.ds.rangeTimeMin\n\n @property\n def max_range_time(self):\n \"\"\"The maximum range time of all bursts in the product.\"\"\"\n return self.ds.rangeTimeMax\n\n @property\n def vg(self):\n \"\"\"Mean ground velocity [m/s].\"\"\"\n try:\n xp = (\n \"productInformation/gridGroundSampling/\"\n \"averageZeroDopplerVelocity\"\n )\n vg = float(self._annot.find(xp).taxt)\n except (AttributeError, ValueError):\n vg = self.grid_spacing[\"y\"] / self.grid_sampling[\"y\"]\n return vg\n\n def processing_setting(self):\n \"\"\"Return the corrections performed.\n\n Read the xml file to identify the corrections performed.\n If a correction is not performed the matrix is filled with zeros.\n \"\"\"\n correction_list = [\n \"troposphericDelayCorrection\",\n \"ionosphericDelayCorrection\",\n \"solidEarthTideCorrection\",\n \"bistaticAzimuthCorrection\",\n \"dopplerShiftRangeCorrection\",\n \"FMMismatchAzimuthCorrection\",\n ]\n dd = {}\n xp_root = (\n \"processingInformation/processor/setapConfigurationFile/\"\n \"processorSettings/\"\n )\n for correction in correction_list:\n xp = xp_root + correction\n ret = self._xpath_to_list(self._annot, xp)\n if ret == \"true\":\n ret = True\n else:\n ret = False\n dd[correction] = ret\n return dd\n\n def _init_burst_catalogue(self):\n \"\"\"Build the burst catalog.\n\n Using information stored in the NetCDF file create a\n pandas.DataFrame containing all the elements allowing to index\n properly a burst.\n \"\"\"\n\n def _to_tdelta64(t):\n return np.float64(t * 1e9).astype(\"timedelta64[ns]\")\n\n data = collections.defaultdict(list)\n t0 = np.datetime64(self.ds.azimuthTimeMin, \"ns\")\n for swath in self.ds.groups.values():\n for burst in swath.groups.values():\n ax = burst.variables[\"azimuth\"]\n tmin = t0 + _to_tdelta64(ax[0])\n tmax = t0 + _to_tdelta64(ax[-1])\n\n data[\"bIndex\"].append(burst.bIndex)\n data[\"pIndex\"].append(burst.pIndex)\n data[\"sIndex\"].append(burst.sIndex)\n data[\"productID\"].append(burst.productID)\n data[\"swathID\"].append(burst.swathID)\n data[\"azimuthTimeMin\"].append(tmin)\n data[\"azimuthTimeMax\"].append(tmax)\n\n df = pd.DataFrame(data=data)\n\n return df\n\n def query_burst(\n self,\n first_time=None,\n product_name=None,\n last_time=None,\n swath=None,\n geometry=None,\n ):\n \"\"\"Query the burst catalogue to retrieve the burst matching by time.\n\n Parameters\n ----------\n first_time : datetime\n is set to None then set to the first time\n last_time : datetime\n if set to None the last_time = first_time\n product_name : str\n Name of a real S1 product e.g.\n S1B_IW_SLC__1SDV_20190805T162509_20190805T162...SAFE\n swath : str or list\n list of swathID e.g. 'IW1' or ['IW1'] or ['IW1', 'IW2']\n geometry : shapely.geometry.[Point, Polygon, ...]\n A shapely geometry for which interstion will be searched\n\n Returns\n -------\n pandas.DataFrame\n Filtered panda dataframe\n \"\"\"\n # first sort the burst by time\n df = self.burst_catalogue.sort_values(by=[\"azimuthTimeMin\"])\n if first_time is None:\n first_time = df.iloc[0].azimuthTimeMin\n if last_time is None:\n last_time = df.iloc[-1].azimuthTimeMax\n\n ix0 = (df.azimuthTimeMin >= first_time) & (\n df.azimuthTimeMax <= last_time\n )\n\n if product_name is not None:\n # build a regex based on the name to avoid issues with annotation\n # products and CRC\n product_name = Sentinel1ProductName(product_name)\n product_name.to_annotation(value=\"[AS]\")\n product_name.crc = \"\"\n filter_ = product_name.recompose(with_suffix=False)\n ix0 = ix0 & self.burst_catalogue.productID.str.contains(\n filter_, regex=True\n )\n\n if swath is not None:\n if isinstance(swath, str):\n swath = [swath]\n ix0 = ix0 & df.swathID.isin(swath)\n\n if geometry is not None:\n bix_list = self.intersects(geometry)\n ix0 = ix0 & df.bIndex.isin(bix_list)\n\n return df.loc[ix0]\n\n def _selection_to_swath_list(self, selection=None):\n if selection is None:\n selection = self.burst_catalogue\n\n if isinstance(selection, pd.DataFrame):\n burst_selection = selection\n swath_list = selection.swathID.unique()\n elif isinstance(selection, str):\n burst_selection = None\n swath_list = [selection]\n else:\n # assume it is a list of swaths already\n import collections.abc\n\n assert isinstance(selection, collections.abc.Iterable)\n assert all(isinstance(item, str) for item in selection)\n burst_selection = None\n swath_list = selection\n\n return swath_list, burst_selection\n\n def iter_swaths(self, selection=None):\n \"\"\"Iterate over swaths according to the specified selection.\n\n Parameters\n ----------\n selection : list(str) or pd.Dataframe, optional\n the list of selected swath IDs or the result of a\n Sentinel1Etad.query_burst query.\n If the selection is None (default) the iteration is performed\n on all the swaths of the product.\n \"\"\"\n swath_list, _ = self._selection_to_swath_list(selection)\n for swath_name in swath_list:\n yield self[swath_name]\n\n def iter_bursts(self, selection=None):\n \"\"\"Iterate over burst according to the specified selection.\n\n Parameters\n ----------\n selection : list(int) or pd.Dataframe, optional\n the list of selected burst indexes or the result of a\n Sentinel1Etad.query_burst query.\n If the selection is None (default) the iteration is performed\n on all the bursts of the product.\n \"\"\"\n if selection is None:\n selection = self.burst_catalogue\n elif not isinstance(selection, pd.DataFrame):\n # assume it is a list of burst indexes\n bursts = selection\n if isinstance(bursts, int):\n bursts = [selection]\n # NOTE: preserve the order\n selection = self.burst_catalogue.bIndex.isin(bursts)\n\n assert isinstance(selection, pd.DataFrame)\n\n for idx, row in selection.iterrows():\n burst = self[row.swathID][row.bIndex]\n yield burst\n\n @staticmethod\n def _xpath_to_list(\n root, xpath, dtype=None, namespace=None, parse_time_func=None\n ):\n ll = [elt.text for elt in root.findall(xpath, namespace)]\n if parse_time_func is not None:\n ll = [datetime.datetime.fromisoformat(t) for t in ll]\n ll = parse_time_func(ll) # TODO: check\n ll = np.asarray(ll, dtype=dtype)\n\n if ll.size == 1:\n return ll.item(0)\n else:\n return ll\n\n def get_statistics(self, correction, meter=False):\n \"\"\"Return the global statistic value of the specified correction.\n\n The returned value is the pre-computed one that is stored in the\n XML annotation file of the product.\n\n Parameters\n ----------\n correction : str or ECorrectionType\n the corrections for which the statistic value is requested\n meter : bool\n if set to True then the returned value is expressed in meters,\n otherwise it is expressed in seconds (default: False)\n\n Returns\n -------\n dict\n a dictionary containing :class:`Statistics` (min, mean and max)\n for all available components of the specified correction:\n\n :x:\n a :class:`Statistics` instance relative to the range\n component of the specified correction\n :y:\n a :class:`Statistics` instance relative to the azimuth\n component of the specified correction\n :unit:\n the units of the returned statistics (\"m\" or \"s\")\n \"\"\"\n units = \"m\" if meter else \"s\"\n\n stat_xp = \"./qualityAndStatistics\"\n target = ECorrectionType(correction)\n target_tag = _STATS_TAG_MAP[target]\n\n statistics = {\"unit\": units}\n\n # NOTE: looping on element and heuristic test on tags is necessary\n # due to inconsistent naming of range and azimuth element\n # TODO: report the inconsistency to DLR? (TBD)\n correction_elem = self._annot.find(f\"{stat_xp}/{target_tag}\")\n for elem in correction_elem:\n if \"range\" in elem.tag:\n direction = \"x\"\n elif \"azimuth\" in elem.tag:\n direction = \"y\"\n else:\n continue\n\n statistics[direction] = Statistics(\n float(elem.findtext(f'min[@unit=\"{units}\"]')),\n float(elem.findtext(f'mean[@unit=\"{units}\"]')),\n float(elem.findtext(f'max[@unit=\"{units}\"]')),\n )\n\n return statistics\n\n def get_footprint(self, selection=None, merge=False):\n \"\"\"Return the footprints of all the bursts as MultiPolygon.\n\n It calls in the back the get_footprint of the Sentinel1EtadBurst class.\n\n Parameters\n ----------\n selection : list(str) or pd.Dataframe, optional\n the list of selected swath IDs or the result of a\n Sentinel1Etad.query_burst query.\n If the selection is None (default) the iteration is performed\n on all the swaths of the product.\n merge : bool\n if set to True return a single polygon that is the union of the\n footprints of all bursts\n \"\"\"\n polys = []\n swath_list, burst_selection = self._selection_to_swath_list(selection)\n for swath in self.iter_swaths(swath_list):\n polys.extend(swath.get_footprint(burst_selection))\n\n if merge:\n polys = shapely.ops.cascaded_union(polys)\n else:\n polys = MultiPolygon(polys)\n\n return polys\n\n def intersects(self, geometry: BaseGeometry):\n \"\"\"Return the list of burst indexes intersecting the input geometry.\n\n Computes the intersection of the footprint of the swath (all bursts)\n with the input geometry.\n\n Parameters\n ----------\n geometry : shapely.geometry.[Point, Polygon, MultiPolygon, line]\n\n Returns\n -------\n list\n list of all the burst intersecting with the input shape geometry\n \"\"\"\n lists_of_burst_indexes = [\n swath.intersects(geometry) for swath in self.iter_swaths()\n ]\n # return the flattened list\n return list(itertools.chain(*lists_of_burst_indexes))\n\n def _swath_merger(\n self,\n burst_var,\n selection=None,\n set_auto_mask=False,\n meter=False,\n fill_value=0.0,\n ):\n if selection is None:\n df = self.burst_catalogue\n elif not isinstance(selection, pd.DataFrame):\n df = self.query_burst(swath=selection)\n else:\n assert isinstance(selection, pd.DataFrame)\n df = selection\n\n # NOTE: assume a specific order of swath IDs\n first_swath = self[df.swathID.min()]\n near_burst = first_swath[first_swath.burst_list[0]]\n last_swath = self[df.swathID.max()]\n far_burst = last_swath[last_swath.burst_list[0]]\n\n rg_first_time = near_burst.sampling_start[\"x\"]\n rg_last_time = (\n far_burst.sampling_start[\"x\"]\n + far_burst.sampling[\"x\"] * far_burst.samples\n )\n az_first_time = df.azimuthTimeMin.min()\n az_last_time = df.azimuthTimeMax.max()\n az_ref_time = self.min_azimuth_time\n az_first_time_rel = (az_first_time - az_ref_time).total_seconds()\n\n sampling = self.grid_sampling\n dx = sampling[\"x\"]\n dy = sampling[\"y\"]\n\n num_samples = (\n np.round((rg_last_time - rg_first_time) / dx).astype(int) + 1\n )\n num_lines = (\n np.round(\n (az_last_time - az_first_time).total_seconds() / dy\n ).astype(int)\n + 1\n )\n\n img = np.full((num_lines, num_samples), fill_value=fill_value)\n # TODO: add some control option\n img = np.ma.array(img, mask=True, fill_value=fill_value)\n\n for swath in self.iter_swaths(df):\n # NOTE: use the private \"Sentinel1EtadSwath._burst_merger\" method\n # to be able to work only on the specified NetCDF variable\n dd_ = swath._burst_merger(\n burst_var,\n selection=df, # noqa\n set_auto_mask=set_auto_mask,\n meter=meter,\n )\n yoffset = dd_[\"first_azimuth_time\"] - az_first_time_rel\n xoffset = dd_[\"first_slant_range_time\"] - rg_first_time\n line_ofs = np.round(yoffset / dy).astype(int)\n sample_ofs = np.round(xoffset / dx).astype(int)\n\n slice_y = slice(line_ofs, line_ofs + dd_[burst_var].shape[0])\n slice_x = slice(sample_ofs, sample_ofs + dd_[burst_var].shape[1])\n\n img[slice_y, slice_x] = dd_[burst_var]\n\n return {\n burst_var: img,\n \"first_azimuth_time\": az_first_time,\n \"first_slant_range_time\": rg_first_time,\n \"sampling\": sampling,\n }\n\n def _core_merge_correction(\n self, prm_list, selection=None, set_auto_mask=True, meter=False\n ):\n dd = {}\n for dim, field in prm_list.items():\n dd_ = self._swath_merger(\n field,\n selection=selection,\n set_auto_mask=set_auto_mask,\n meter=meter,\n )\n dd[dim] = dd_[field]\n dd[\"sampling\"] = dd_[\"sampling\"]\n dd[\"first_azimuth_time\"] = dd_[\"first_azimuth_time\"]\n dd[\"first_slant_range_time\"] = dd_[\"first_slant_range_time\"]\n\n dd[\"unit\"] = \"m\" if meter else \"s\"\n\n # To compute lat/lon/h make a new selection with all gaps filled\n swath_list, _ = self._selection_to_swath_list(selection)\n near_swath = min(swath_list)\n far_swath = max(swath_list)\n idx = self.burst_catalogue.swathID >= near_swath\n idx &= self.burst_catalogue.swathID <= far_swath\n swaths = self.burst_catalogue.swathID[idx].unique()\n\n data = dd[\"x\" if \"x\" in prm_list else \"y\"]\n lines = data.shape[0]\n duration = lines * self.grid_sampling[\"y\"]\n duration = np.float64(duration * 1e9).astype(\"timedelta64[ns]\")\n first_time = dd[\"first_azimuth_time\"]\n last_time = first_time + duration\n\n filled_selection = self.query_burst(\n first_time=first_time, last_time=last_time, swath=swaths\n )\n\n dd[\"lats\"] = self._swath_merger(\n \"lats\",\n selection=filled_selection,\n set_auto_mask=set_auto_mask,\n meter=False,\n fill_value=np.nan,\n )[\"lats\"]\n dd[\"lons\"] = self._swath_merger(\n \"lons\",\n selection=filled_selection,\n set_auto_mask=set_auto_mask,\n meter=False,\n fill_value=np.nan,\n )[\"lons\"]\n dd[\"height\"] = self._swath_merger(\n \"height\",\n selection=filled_selection,\n set_auto_mask=set_auto_mask,\n meter=False,\n fill_value=np.nan,\n )[\"height\"]\n return dd\n\n def merge_correction(\n self,\n name: CorrectionType = ECorrectionType.SUM,\n selection=None,\n set_auto_mask=True,\n meter=False,\n direction=None,\n ):\n \"\"\"Merge multiple swaths of the specified correction variable.\n\n Data of the selected swaths (typically overlapped) are merged\n together to form a single data matrix with a consistent (range and\n azimuth) time axis.\n\n Note\n ----\n\n The current implementation uses a very simple algorithm that\n iterates over selected swaths and bursts and stitches correction\n data together.\n\n In overlapping regions, new data simpy overwrite the old ones.\n This is an easy algorithm and perfectly correct for atmospheric\n and geodetic correction.\n\n It is, instead, sub-optimal for system corrections (bi-static,\n Doppler, FM Rate) which have different values in overlapping\n regions. In this case results are *not* correct.\n\n Parameters\n ----------\n name : str or CorrectionType\n the name of the desired correction\n selection : list or pandas.DataFrame\n list of selected bursts (by default all bursts are selected)\n set_auto_mask : bool\n requested for netCDF4 to avoid retrieving a masked array\n meter : bool\n transform the result in meters\n direction : str or None\n if set to \"x\" (for range) or \"y\" (for \"azimuth\") only extracts\n the specified correction component.\n By default (None) all available components are returned.\n\n Returns\n -------\n dict\n a dictionary containing merged data and sampling information:\n\n :<burst_var_name>:\n merged data for the selected burst_var\n :first_azimuth_time:\n the relative azimuth first time\n :first_slant_range_time:\n the relative (slant) range first time\n :sampling:\n a dictionary containing the sampling along the\n 'x' and 'y' directions and the 'unit'\n :units:\n of the correction (seconds or meters)\n :lats:\n the matrix of latitude values (in degrees) for each point\n :lons:\n the matrix of longitude values (in degrees) for each point\n :height:\n the matrix of height values (in meters) for each point\n \"\"\"\n correction_type = ECorrectionType(name) # check values\n prm_list = _CORRECTION_NAMES_MAP[correction_type.value]\n if direction is not None:\n prm_list = {direction: prm_list[direction]}\n correction = self._core_merge_correction(\n prm_list,\n selection=selection,\n set_auto_mask=set_auto_mask,\n meter=meter,\n )\n correction[\"name\"] = correction_type.value\n return correction" }, { "identifier": "Sentinel1EtadSwath", "path": "s1etad/product.py", "snippet": "class Sentinel1EtadSwath:\n \"\"\"Object representing a swath in the S1-ETAD product.\n\n This objects are returned by methods of the :class:`Sentine1Etad` class.\n It is not expected that the user instantiates this objects directly.\n \"\"\"\n\n def __init__(self, nc_group):\n self._grp = nc_group\n\n @functools.lru_cache()\n def __getitem__(self, burst_index):\n burst_name = f\"Burst{burst_index:04d}\"\n return Sentinel1EtadBurst(self._grp[burst_name])\n\n def __iter__(self):\n yield from self.iter_bursts()\n\n def __repr__(self):\n return f'{self.__class__.__name__}(\"{self._grp.path}\") 0x{id(self):x}'\n\n @property\n def burst_list(self):\n \"\"\"The list of burst identifiers (str) of all bursts in the swath.\"\"\"\n return [burst.bIndex for burst in self._grp.groups.values()]\n\n @property\n def number_of_burst(self):\n \"\"\"The number of bursts in the swath.\"\"\"\n return len(self._grp.groups)\n\n @property\n def swath_id(self):\n \"\"\"The swath identifier (str).\"\"\"\n return self._grp.swathID\n\n @property\n def swath_index(self):\n \"\"\"The swath index (int).\"\"\"\n return self._grp.sIndex\n\n @property\n def sampling_start(self):\n \"\"\"Relative sampling start times.\"\"\"\n first_burst_index = self.burst_list[0]\n first_burst = self[first_burst_index]\n return first_burst.sampling_start\n\n @property\n def sampling(self):\n \"\"\"Sampling in seconds used for all bursts of the swath.\n\n A dictionary containing the following keys:\n\n * \"x\": range spacing,\n * \"y\": azimuth spacing,\n * \"units\": the measurement units used for \"x' and \"y\"\n \"\"\"\n first_burst_index = self.burst_list[0]\n first_burst = self[first_burst_index]\n return first_burst.sampling\n\n def _selection_to_burst_index_list(self, selection=None):\n if selection is None:\n index_list = self.burst_list\n elif isinstance(selection, pd.DataFrame):\n idx = selection.swathID == self.swath_id\n index_list = selection.bIndex[idx].values\n else:\n index_list = selection\n return index_list\n\n def iter_bursts(self, selection=None):\n \"\"\"Iterate over bursts according to the specified selection.\n\n Parameters\n ----------\n selection : list(int) or pd.Dataframe, optional\n the list of selected bursts or result of a\n Sentinel1Etad.query_burst query.\n If the selection is None (default) the iteration is performed\n on all the burst of the swath.\n \"\"\"\n index_list = self._selection_to_burst_index_list(selection)\n for burst_index in index_list:\n yield self[burst_index]\n\n def get_footprint(self, selection=None, merge=False):\n \"\"\"Return the footprints of all the bursts as MultiPolygon.\n\n It calls in the back the get_footprint of the Sentinel1EtadBurst class.\n\n Parameters\n ----------\n selection : list(int) or pd.Dataframe, optional\n the list of selected bursts or result of a\n Sentinel1Etad.query_burst query.\n If the selection is None (default) the iteration is performed\n on all the burst of the swath.\n merge : bool\n if set to True return a single polygon that is the union of the\n footprints of all bursts\n \"\"\"\n polys = [\n burst.get_footprint() for burst in self.iter_bursts(selection)\n ]\n if merge:\n polys = shapely.ops.cascaded_union(polys)\n else:\n polys = MultiPolygon(polys)\n\n return polys\n\n def intersects(self, geometry: BaseGeometry):\n \"\"\"Return the list of burst indexes intersecting the input geometry.\n\n Computes the intersection of the footprint of the swath (all bursts)\n with the input Geometry\n\n Parameters\n ----------\n geometry : shapely.geometry.[Point, Polygon, MultiPolygon, line]\n\n Returns\n -------\n list\n list of the indexes of all bursts intersecting with the input\n geometry\n \"\"\"\n assert isinstance(\n geometry, BaseGeometry\n ), \"The input shape is not a shapely BaseGeometry object\"\n burst_index_list = []\n swath_footprint = self.get_footprint(merge=True)\n if swath_footprint.intersects(geometry):\n burst_index_list = [\n b.burst_index\n for b in self.iter_bursts()\n if b.intersects(geometry)\n ]\n return burst_index_list\n\n def _burst_merger(\n self,\n burst_var,\n selection=None,\n az_time_min=None,\n az_time_max=None,\n set_auto_mask=False,\n meter=False,\n fill_value=0.0,\n ):\n \"\"\"Low level method to de-burst a NetCDF variable.\n\n The de-burst strategy is simple as the latest line is on top of the\n oldest.\n\n Parameters\n ----------\n burst_var : str\n one of the burst netcdf variables\n selection : list or pandas.DataFrame\n list of selected bursts (by default all bursts are selected)\n az_time_min : float\n minimum azimuth time of the merged swath\n (relative to the reference annotated in the NetCDF root)\n az_time_max : float\n maximum azimuth tim eof the merged swath\n (relative to the reference annotated in the NetCDF root)\n set_auto_mask : bool\n requested for netCDF4 to avoid retrieving a masked array\n meter : bool\n transform the result in meters\n\n Returns\n -------\n dict\n a dictionary containing merged data and sampling information:\n\n :<burst_var_name>: merged data for the selected burst_var\n :first_azimuth_time: the relative azimuth first time\n :first_slant_range_time: the relative (slant) range first time\n :sampling: a dictionary containing the sampling along the\n 'x' and 'y' directions and the 'unit'\n \"\"\"\n burst_index_list = self._selection_to_burst_index_list(selection)\n\n # Find what is the extent of the acquisition in azimuth\n first_burst = self[burst_index_list[0]]\n last_burst = self[burst_index_list[-1]]\n\n if az_time_min is None:\n t0 = first_burst.sampling_start[\"y\"]\n else:\n t0 = az_time_min\n\n last_azimuth, _ = last_burst.get_burst_grid()\n if az_time_max is None:\n t1 = last_azimuth[-1]\n else:\n t1 = az_time_max\n\n tau0 = min(\n burst.sampling_start[\"x\"]\n for burst in self.iter_bursts(burst_index_list)\n )\n\n # grid sampling\n dt = first_burst.sampling[\"y\"]\n dtau = first_burst.sampling[\"x\"]\n\n num_lines = np.round((t1 - t0) / dt).astype(int) + 1\n num_samples = max(\n burst.samples for burst in self.iter_bursts(burst_index_list)\n )\n\n debursted_var = np.full(\n (num_lines, num_samples), fill_value=fill_value\n )\n # TODO: add some control option\n debursted_var = np.ma.array(\n debursted_var, mask=True, fill_value=fill_value\n )\n\n for burst_ in self.iter_bursts(burst_index_list):\n assert (\n dt == burst_.sampling[\"y\"]\n ), \"The azimuth sampling is changing long azimuth\"\n assert (\n first_burst.sampling_start[\"x\"] == burst_.sampling_start[\"x\"]\n ), \"The 2-way range gridStartRangeTime is changing long azimuth\"\n\n # get the timing of the burst and convert into line index\n az_time_, rg_time_ = burst_.get_burst_grid()\n line_index_ = np.round((az_time_ - t0) / dt).astype(int)\n p0 = np.round((rg_time_[0] - tau0) / dtau).astype(int)\n\n # NOTE: use the private \"Sentinel1EtadBurst._get_etad_param\" method\n # to be able to work only on the specified NetCDF variable\n var_ = burst_._get_etad_param(\n burst_var, set_auto_mask=set_auto_mask, meter=meter # noqa\n )\n\n _, burst_samples = var_.shape\n debursted_var[line_index_, p0 : p0 + burst_samples] = var_\n\n return {\n burst_var: debursted_var,\n \"first_azimuth_time\": t0,\n \"first_slant_range_time\": first_burst.sampling_start[\"x\"],\n \"sampling\": first_burst.sampling,\n }\n\n def _core_merge_correction(\n self, prm_list, selection=None, set_auto_mask=True, meter=False\n ):\n dd = {}\n for dim, field in prm_list.items():\n dd_ = self._burst_merger(\n field,\n selection=selection,\n set_auto_mask=set_auto_mask,\n meter=meter,\n )\n dd[dim] = dd_[field]\n dd[\"sampling\"] = dd_[\"sampling\"]\n dd[\"first_azimuth_time\"] = dd_[\"first_azimuth_time\"]\n dd[\"first_slant_range_time\"] = dd_[\"first_slant_range_time\"]\n\n dd[\"unit\"] = \"m\" if meter else \"s\"\n dd[\"lats\"] = self._burst_merger(\n \"lats\", set_auto_mask=set_auto_mask, meter=False\n )[\"lats\"]\n dd[\"lons\"] = self._burst_merger(\n \"lons\", set_auto_mask=set_auto_mask, meter=False\n )[\"lons\"]\n dd[\"height\"] = self._burst_merger(\n \"height\", set_auto_mask=set_auto_mask, meter=False\n )[\"height\"]\n return dd\n\n def merge_correction(\n self,\n name: CorrectionType = ECorrectionType.SUM,\n selection=None,\n set_auto_mask=True,\n meter=False,\n direction=None,\n ):\n \"\"\"Merge multiple bursts of the specified correction variable.\n\n Data of the selected bursts (typically overlapped) are merged\n together to form a single data matrix with a consistent (azimuth)\n time axis.\n\n Note\n ----\n\n The current implementation uses a very simple algorithm that\n iterates over selected bursts and stitches correction data\n together.\n\n In overlapping regions, new data simpy overwrite the old ones.\n This is an easy algorithm and perfectly correct for atmospheric\n and geodetic correction.\n\n It is, instead, sub-optimal for system corrections (bi-static,\n Doppler, FM Rate) which have different values in overlapping\n regions. In this case results are *not* correct.\n\n Parameters\n ----------\n name : str or CorrectionType\n the name of the desired correction\n selection : list or pandas.DataFrame\n list of selected bursts (by default all bursts are selected)\n set_auto_mask : bool\n requested for netCDF4 to avoid retrieving a masked array\n meter : bool\n transform the result in meters\n direction : str or None\n if set to \"x\" (for range) or \"y\" (for \"azimuth\") only extracts\n the specified correction component.\n By default (None) all available components are returned.\n\n Returns\n -------\n dict\n a dictionary containing merged data and sampling information:\n\n :<burst_var_name>:\n merged data for the selected burst_var\n :first_azimuth_time:\n the relative azimuth first time\n :first_slant_range_time:\n the relative (slant) range first time\n :sampling:\n a dictionary containing the sampling along the\n 'x' and 'y' directions and the 'unit'\n :units:\n of the correction (seconds or meters)\n :lats:\n the matrix of latitude values (in degrees) for each point\n :lons:\n the matrix of longitude values (in degrees) for each point\n :height:\n the matrix of height values (in meters) for each point\n \"\"\"\n correction_type = ECorrectionType(name) # check values\n prm_list = _CORRECTION_NAMES_MAP[correction_type.value]\n if direction is not None:\n prm_list = {direction: prm_list[direction]}\n correction = self._core_merge_correction(\n prm_list,\n selection=selection,\n set_auto_mask=set_auto_mask,\n meter=meter,\n )\n correction[\"name\"] = correction_type.value\n return correction" }, { "identifier": "Sentinel1EtadBurst", "path": "s1etad/product.py", "snippet": "class Sentinel1EtadBurst:\n \"\"\"Object representing a burst in the S1-ETAD product.\n\n This objects are returned by methods of the :class:`Sentinel1EtadSwath`\n class.\n It is not expected that the user instantiates this objects directly.\n \"\"\"\n\n def __init__(self, nc_group):\n self._grp = nc_group\n self._geocoder = None\n\n def __repr__(self):\n return f'{self.__class__.__name__}(\"{self._grp.path}\") 0x{id(self):x}'\n\n @property\n def product_id(self):\n \"\"\"The S1 product (str) to which the burst object is associated.\"\"\"\n return self._grp.productID\n\n @property\n def swath_id(self):\n \"\"\"The swath identifier (str) to which the burst belongs.\"\"\"\n return self._grp.swathID\n\n @property\n def burst_id(self):\n \"\"\"The burst identifier (str).\"\"\"\n return self._grp.name\n\n @property\n def product_index(self):\n \"\"\"Index (int) of the S1 product to which the burst is associated.\"\"\"\n return self._grp.pIndex\n\n @property\n def swath_index(self):\n \"\"\"The index (int) of the swath to which the burst belongs.\"\"\"\n return self._grp.sIndex\n\n @property\n def burst_index(self):\n \"\"\"The index (int) of the burst.\"\"\"\n return self._grp.bIndex\n\n @functools.lru_cache()\n def get_footprint(self):\n \"\"\"Return the footprint of ghe bursts as shapely.Polygon.\n\n It gets the lat/lon/height grid and extract the 4 corners.\n \"\"\"\n lats, lons, heights = self.get_lat_lon_height()\n corner_list = [(0, 0), (0, -1), (-1, -1), (-1, 0)]\n etaf_burst_footprint = []\n for corner in corner_list:\n lat_, lon_, h_ = lats[corner], lons[corner], heights[corner]\n etaf_burst_footprint.append((lon_, lat_, h_))\n etaf_burst_footprint = Polygon(etaf_burst_footprint)\n return etaf_burst_footprint\n\n def intersects(self, geometry: BaseGeometry):\n \"\"\"Intersects the footprint of the burst with the provided shape\n\n Parameters\n ----------\n geometry : shapely.geometry.[Point, Polygon, MultiPolygon, line]\n\n Returns\n -------\n bool\n True if intersects, False otherwise\n \"\"\"\n assert isinstance(\n geometry, BaseGeometry\n ), \"Not a shapely BaseGeometry object\"\n return self.get_footprint().intersects(geometry)\n\n def get_burst_grid(self):\n \"\"\"Return the t, tau grid of the burst.\"\"\"\n azimuth = self._get_etad_param(\"azimuth\", set_auto_mask=True)\n range_ = self._get_etad_param(\"range\", set_auto_mask=True)\n return azimuth, range_\n\n @property\n def sampling_start(self):\n \"\"\"Relative sampling start times.\n\n Value in seconds relative to the beginning of the product.\n \"\"\"\n # TODO: put a reference in the docstring to the proper\n # Sentinel1Etad property.\n return dict(\n x=self._grp.gridStartRangeTime,\n y=self._grp.gridStartAzimuthTime,\n units=\"s\",\n )\n\n @property\n def sampling(self):\n \"\"\"Sampling in seconds used for all bursts of the swath.\n\n A dictionary containing the following keys:\n\n * \"x\": range spacing,\n * \"y\": azimuth spacing,\n * \"units\": the measurement units used for \"x' and \"y\"\n \"\"\"\n return dict(\n x=self._grp.gridSamplingRange,\n y=self._grp.gridSamplingAzimuth,\n units=\"s\",\n )\n\n @property\n def lines(self):\n \"\"\"The number of lines in the burst.\"\"\"\n return self._grp.dimensions[\"azimuthExtent\"].size\n\n @property\n def samples(self):\n \"\"\"The number of samples in the burst.\"\"\"\n return self._grp.dimensions[\"rangeExtent\"].size\n\n @property\n def vg(self) -> float:\n \"\"\"Average zero-Doppler ground velocity [m/s].\"\"\"\n return self._grp.averageZeroDopplerVelocity\n\n @property\n def reference_polarization(self) -> str:\n \"\"\"Reverence polarization (string).\"\"\"\n return self._grp.referencePolarization\n\n def get_polarimetric_channel_offset(self, channel: str) -> dict:\n \"\"\"Polarimetric channel delay.\n\n Return the electronic delay of the specified polarimetric channel\n w.r.t. the reference one (see\n :data:`Sentinel1EtadBurst.reference_polarization`).\n\n channel : str\n the string ID of the requested polarimetric channel:\n * 'VV' or 'VH' for DV products\n * 'HH' or 'HV' for DH products\n \"\"\"\n if channel not in {\"HH\", \"HV\", \"VV\", \"VH\"}:\n raise ValueError(f\"invalid channel ID: {channel!r}\")\n\n if channel[0] != self._grp.referencePolarization[0]:\n raise ValueError(\n f\"polarimetric channel not available: {channel!r}\"\n )\n\n data = dict(units=\"s\")\n\n if channel == \"HH\":\n data[\"x\"] = (self._grp.rangeOffsetHH,)\n data[\"y\"] = (self._grp.rangeOffsetHH,)\n elif channel == \"HV\":\n data[\"x\"] = (self._grp.rangeOffsetHV,)\n data[\"y\"] = (self._grp.rangeOffsetHV,)\n elif channel == \"VH\":\n data[\"x\"] = (self._grp.rangeOffsetVH,)\n data[\"y\"] = (self._grp.rangeOffsetVH,)\n elif channel == \"VV\":\n data[\"x\"] = (self._grp.rangeOffsetVV,)\n data[\"y\"] = (self._grp.rangeOffsetVV,)\n\n return data\n\n def get_timing_calibration_constants(self) -> dict:\n try:\n return dict(\n x=self._grp.instrumentTimingCalibrationRange,\n y=self._grp.instrumentTimingCalibrationAzimuth,\n units=\"s\",\n )\n except AttributeError:\n # @COMPATIBILITY: with SETAP , v1.6\n warnings.warn(\n \"instrument timing calibration constants are not available \"\n \"in the NetCDF data component this product. \"\n \"Calibration constants have been added to the NetCDF \"\n \"component in SETAP v1.6 (ETAD-DLR-PS-0014 - \"\n '\"ETAD Product Format Specification\" Issue 1.5).'\n )\n return dict(x=0, y=0, units=\"s\")\n\n def _get_etad_param(\n self, name, set_auto_mask=False, transpose=False, meter=False\n ):\n assert (\n name in self._grp.variables\n ), f\"Parameter {name!r} is not allowed\"\n\n self._grp.set_auto_mask(set_auto_mask)\n\n # TODO: avoid double copies\n # TODO: decimation factor\n field = np.asarray(self._grp[name])\n if transpose:\n field = np.transpose(field)\n\n if meter:\n if name.endswith(\"Az\"):\n k = self._grp.averageZeroDopplerVelocity\n elif name.endswith(\"Rg\"):\n k = constants.c / 2\n else:\n # it is not a correction (azimuth, range, lats, lons, height)\n k = 1\n warnings.warn(\n f\"the {name} is not a correction: \"\n 'the \"meter\" parameter will be ignored'\n )\n field *= k\n\n return field\n\n def get_lat_lon_height(self, transpose=False):\n \"\"\"Return the latitude, longitude and height for each point.\n\n Data are returned as (3) matrices (lines x samples).\n Latitude and longitude are expressed in degrees, height is\n expressed in meters.\n \"\"\"\n lats = self._get_etad_param(\n \"lats\", transpose=transpose, meter=False, set_auto_mask=True\n )\n lons = self._get_etad_param(\n \"lons\", transpose=transpose, meter=False, set_auto_mask=True\n )\n h = self._get_etad_param(\n \"height\", transpose=transpose, meter=False, set_auto_mask=True\n )\n return lats, lons, h\n\n def _core_get_correction(\n self, prm_list, set_auto_mask=False, transpose=False, meter=False\n ):\n correction = {}\n for dim, field in prm_list.items():\n correction[dim] = self._get_etad_param(\n field,\n set_auto_mask=set_auto_mask,\n transpose=transpose,\n meter=meter,\n )\n\n correction[\"unit\"] = \"m\" if meter else \"s\"\n\n return correction\n\n def get_correction(\n self,\n name: CorrectionType = ECorrectionType.SUM,\n set_auto_mask=False,\n transpose=False,\n meter=False,\n direction=None,\n ):\n \"\"\"Retrieve the correction for the specified correction \"name\".\n\n Puts the results in a dict.\n\n Parameters\n ----------\n name : ECorrectionType or str\n the desired correction\n set_auto_mask : bool\n requested for netCDF4 to avoid retrieving a masked array\n transpose : bool\n requested to retrieve the correction in array following the\n numpy convention for dimensions (default: False)\n meter : bool\n transform the result in meters\n direction : str or None\n if set to \"x\" (for range) or \"y\" (for \"azimuth\") only extracts\n the specified correction component.\n By default (None) all available components are returned.\n\n Returns\n -------\n dict\n a dictionary containing the following items for the\n requested correction:\n\n :x: correction in range (if applicable)\n :y: correction in azimuth (if applicable)\n :unit: 'm' or 's'\n :name: name of the correction\n \"\"\"\n correction_type = ECorrectionType(name) # check values\n name = correction_type.value\n prm_list = _CORRECTION_NAMES_MAP[name]\n if direction is not None:\n prm_list = {direction: prm_list[direction]}\n correction = self._core_get_correction(\n prm_list,\n set_auto_mask=set_auto_mask,\n transpose=transpose,\n meter=meter,\n )\n correction[\"name\"] = name\n return correction\n\n def _get_geocoder(self):\n if self._geocoder is None:\n from .geometry import GridGeocoding\n\n azimuth, range_ = self.get_burst_grid()\n lats, lons, heights = self.get_lat_lon_height()\n self._geocoder = GridGeocoding(\n lats, lons, heights, xaxis=range_, yaxis=azimuth\n )\n return self._geocoder\n\n def radar_to_geodetic(self, tau, t, deg=True):\n \"\"\"Convert RADAR coordinates into geodetic coordinates.\n\n Compute the geodetic coordinates (lat, lon, h) corresponding to\n RADAR coordinates (tau, t), i.e. fast time (range time) and slow\n time (azimuth time expressed in seconds form the reference\n :data:`Sentinel1Etad.min_azimuth_time`)::\n\n (tau, t) -> (lat, lon, h)\n\n If ``deg`` is True the output ``lat`` and ``lon`` are expressed\n in degrees, otherwise in radians.\n\n The implementation is approximated and exploits pre-computed grids\n of latitude, longitude and height values.\n\n The method is not as accurate as solving the range-Doppler equations.\n\n .. seealso:: :class:`s1etad.geometry.GridGeocoding`.\n \"\"\"\n return self._get_geocoder().forward_geocode(tau, t, deg=deg)\n\n def geodetic_to_radar(self, lat, lon, h=0, deg=True):\n \"\"\"Convert geodetic coordinates into RADAR coordinates.\n\n Compute the RADAR coordinates (tau, t), i.e. fast time (range time)\n and slow time (azimuth time expressed in seconds form the reference\n :data:`Sentinel1Etad.min_azimuth_time`) corresponding to\n geodetic coordinates (lat, lon, h)::\n\n (lat, lon, h) -> (tau, t)\n\n If ``deg`` is True it is assumed that input ``lat`` and ``lon``\n are expressed in degrees, otherwise it is assumed that angles\n are expressed in radians.\n\n The implementation is approximated and exploits pre-computed grids\n of latitude, longitude and height values.\n\n The method is not as accurate as solving the range-Doppler equations.\n\n .. seealso:: :class:`s1etad.geometry.GridGeocoding`.\n \"\"\"\n return self._get_geocoder().backward_geocode(lat, lon, h, deg=deg)\n\n def radar_to_image(self, t, tau):\n \"\"\"Convert RADAR coordinates into image coordinates.\n\n Compute the image coordinates (line, sample) corresponding\n to RADAR coordinates (tau, t), i.e. fast time (range time) and\n slow time (azimuth time expressed in seconds form the reference\n :data:`Sentinel1Etad.min_azimuth_time`)::\n\n (tau, t) -> (line, sample)\n \"\"\"\n line = (t - self.sampling_start[\"y\"]) / self.sampling[\"y\"]\n sample = (tau - self.sampling_start[\"x\"]) / self.sampling[\"x\"]\n return line, sample\n\n def image_to_radar(self, line, sample):\n \"\"\"Convert image coordinates into RADAR coordinates.\n\n Compute the RADAR coordinates (tau, t), i.e. fast time (range time)\n and slow time (azimuth time expressed in seconds form the reference\n :data:`Sentinel1Etad.min_azimuth_time`) corresponding to\n image coordinates (line, sample)::\n\n (line, sample) -> (t, tau)\n \"\"\"\n t = self.sampling_start[\"y\"] + line * self.sampling[\"y\"]\n tau = self.sampling_start[\"x\"] + sample * self.sampling[\"x\"]\n return t, tau" } ]
from .product import Sentinel1Etad, Sentinel1EtadSwath, Sentinel1EtadBurst
13,474
# -*- coding: utf-8 -*- def _sentinel1_etad_repr_pretty_(obj, p, cycle): if cycle: p.text(repr(obj)) else: p.text(repr(obj)) p.break_() plist = obj.s1_product_list() if isinstance(plist, str): plist = [plist] p.text(f"Number of Sentinel-1 slices: {len(plist)}") p.break_() with p.group(2, "Sentinel-1 products list:"): for name in plist: p.break_() p.text(name) p.break_() p.text(f"Number of swaths: {obj.number_of_swath}") p.break_() p.text("Swath list: {}".format(", ".join(obj.swath_list))) p.break_() with p.group(2, "Azimuth time:"): p.break_() p.text(f"min: {obj.min_azimuth_time}") p.break_() p.text(f"max: {obj.max_azimuth_time}") p.break_() with p.group(2, "Range time:"): p.break_() p.text(f"min: {obj.min_range_time}") p.break_() p.text(f"max: {obj.max_range_time}") p.break_() with p.group(2, "Grid sampling:"): for key, value in obj.grid_sampling.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Grid spacing:"): for key, value in obj.grid_spacing.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Processing settings:"): for key, value in obj.processing_setting().items(): p.break_() p.text(f"{key}: {value}") def _sentinel1_etad_swath_repr_pretty_(obj, p, cycle): if cycle: p.text(repr(obj)) else: p.text(repr(obj)) p.break_() p.text(f"Swaths ID: {obj.swath_id}") p.break_() p.text(f"Number of bursts: {obj.number_of_burst}") p.break_() p.text("Burst list: " + str(obj.burst_list)) p.break_() with p.group(2, "Sampling start:"): for key, value in obj.sampling_start.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Sampling:"): for key, value in obj.sampling.items(): p.break_() p.text(f"{key}: {value}") def _sentinel1_etad_burst_repr_pretty_(obj, p, cycle): if cycle: p.text(repr(obj)) else: p.text(repr(obj)) p.break_() p.text(f"Swaths ID: {obj.swath_id}") p.break_() p.text(f"Burst index: {obj.burst_index}") p.break_() p.text(f"Shape: ({obj.lines}, {obj.samples})") p.break_() with p.group(2, "Sampling start:"): for key, value in obj.sampling_start.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Sampling:"): for key, value in obj.sampling.items(): p.break_() p.text(f"{key}: {value}") def _register_jupyter_formatters(): try: ipy = get_ipython() # noqa except NameError: return False else: formatter = ipy.display_formatter.formatters["text/plain"] formatter.for_type(Sentinel1Etad, _sentinel1_etad_repr_pretty_) formatter.for_type( Sentinel1EtadSwath, _sentinel1_etad_swath_repr_pretty_ ) formatter.for_type(
# -*- coding: utf-8 -*- def _sentinel1_etad_repr_pretty_(obj, p, cycle): if cycle: p.text(repr(obj)) else: p.text(repr(obj)) p.break_() plist = obj.s1_product_list() if isinstance(plist, str): plist = [plist] p.text(f"Number of Sentinel-1 slices: {len(plist)}") p.break_() with p.group(2, "Sentinel-1 products list:"): for name in plist: p.break_() p.text(name) p.break_() p.text(f"Number of swaths: {obj.number_of_swath}") p.break_() p.text("Swath list: {}".format(", ".join(obj.swath_list))) p.break_() with p.group(2, "Azimuth time:"): p.break_() p.text(f"min: {obj.min_azimuth_time}") p.break_() p.text(f"max: {obj.max_azimuth_time}") p.break_() with p.group(2, "Range time:"): p.break_() p.text(f"min: {obj.min_range_time}") p.break_() p.text(f"max: {obj.max_range_time}") p.break_() with p.group(2, "Grid sampling:"): for key, value in obj.grid_sampling.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Grid spacing:"): for key, value in obj.grid_spacing.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Processing settings:"): for key, value in obj.processing_setting().items(): p.break_() p.text(f"{key}: {value}") def _sentinel1_etad_swath_repr_pretty_(obj, p, cycle): if cycle: p.text(repr(obj)) else: p.text(repr(obj)) p.break_() p.text(f"Swaths ID: {obj.swath_id}") p.break_() p.text(f"Number of bursts: {obj.number_of_burst}") p.break_() p.text("Burst list: " + str(obj.burst_list)) p.break_() with p.group(2, "Sampling start:"): for key, value in obj.sampling_start.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Sampling:"): for key, value in obj.sampling.items(): p.break_() p.text(f"{key}: {value}") def _sentinel1_etad_burst_repr_pretty_(obj, p, cycle): if cycle: p.text(repr(obj)) else: p.text(repr(obj)) p.break_() p.text(f"Swaths ID: {obj.swath_id}") p.break_() p.text(f"Burst index: {obj.burst_index}") p.break_() p.text(f"Shape: ({obj.lines}, {obj.samples})") p.break_() with p.group(2, "Sampling start:"): for key, value in obj.sampling_start.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Sampling:"): for key, value in obj.sampling.items(): p.break_() p.text(f"{key}: {value}") def _register_jupyter_formatters(): try: ipy = get_ipython() # noqa except NameError: return False else: formatter = ipy.display_formatter.formatters["text/plain"] formatter.for_type(Sentinel1Etad, _sentinel1_etad_repr_pretty_) formatter.for_type( Sentinel1EtadSwath, _sentinel1_etad_swath_repr_pretty_ ) formatter.for_type(
Sentinel1EtadBurst, _sentinel1_etad_burst_repr_pretty_
2
2023-10-27 13:47:30+00:00
16k
ifrit98/storage-subnet
storage/validator/store.py
[ { "identifier": "EventSchema", "path": "storage/validator/event.py", "snippet": "class EventSchema:\n task_name: str # Task type, e.g. 'store', 'challenge', 'retrieve' 'broadcast'\n successful: List[bool] # List of whether or not the task was successful or not\n completion_times: List[float] # List of completion times for a given task\n task_status_messages: List[\n str\n ] # List of completion status messages for a given prompt\n task_status_codes: List[str] # List of completion status codes for a given prompt\n block: float # Current block at given step\n uids: List[int] # Queried uids\n step_length: float # Elapsed time between the beginning of a run step to the end of a run step\n best_uid: str # Best completion for given task\n best_hotkey: str # Best hotkey for given task\n\n # Reward data\n rewards: List[float] # Reward vector for given step\n\n # Weights data and moving averages\n set_weights: Optional[List[List[float]]] = None\n moving_averaged_scores: Optional[List[float]] = None\n\n @staticmethod\n def from_dict(event_dict: dict) -> \"EventSchema\":\n \"\"\"Converts a dictionary to an EventSchema object.\"\"\"\n\n return EventSchema(\n task_name=event_dict[\"task_name\"],\n successful=event_dict[\"successful\"],\n completion_times=event_dict[\"completion_times\"],\n task_status_messages=event_dict[\"task_status_messages\"],\n task_status_codes=event_dict[\"task_status_codes\"],\n block=event_dict[\"block\"],\n uids=event_dict[\"uids\"],\n step_length=event_dict[\"step_length\"],\n best_uid=event_dict[\"best_uid\"],\n best_hotkey=event_dict[\"best_hotkey\"],\n rewards=event_dict[\"rewards\"],\n set_weights=event_dict[\"set_weights\"],\n moving_averaged_scores=event_dict[\"moving_averaged_scores\"],\n )" }, { "identifier": "protocol", "path": "storage/protocol.py", "snippet": "class Store(bt.Synapse):\nclass StoreUser(bt.Synapse):\nclass Challenge(bt.Synapse):\nclass Retrieve(bt.Synapse):\nclass RetrieveUser(bt.Synapse):\n def __str__(self):\n def __str__(self):\n def __str__(self):" }, { "identifier": "hash_data", "path": "storage/shared/ecc.py", "snippet": "def hash_data(data):\n \"\"\"\n Compute a SHA3-256 hash of the input data and return its integer representation.\n\n The function handles both byte-like and non-byte-like inputs by converting non-byte inputs to\n strings and then encoding to bytes before hashing.\n\n Parameters:\n - data (bytes | bytearray | object): Data to be hashed.\n\n Returns:\n - int: Integer representation of the SHA3-256 hash of the input data.\n\n Raises:\n - TypeError: If the hashing operation encounters an incompatible data type.\n \"\"\"\n if not isinstance(data, (bytes, bytearray)):\n data_str = str(data)\n data = data_str.encode()\n h = hashlib.sha3_256(data).hexdigest()\n return int(h, 16)" }, { "identifier": "setup_CRS", "path": "storage/shared/ecc.py", "snippet": "def setup_CRS(curve=\"P-256\"):\n \"\"\"\n Generate a pair of random points to serve as a Common Reference String (CRS) for elliptic curve operations.\n\n The CRS is essential for various cryptographic protocols that rely on a shared reference\n between parties, typically for the purpose of ensuring consistent cryptographic operations.\n\n Parameters:\n - curve (str, optional): Name of the elliptic curve to use; defaults to \"P-256\".\n\n Returns:\n - tuple(ECC.EccPoint, ECC.EccPoint): A 2-tuple of ECC.EccPoint instances representing the base points (g, h).\n\n Raises:\n - ValueError: If the specified elliptic curve name is not recognized.\n \"\"\"\n curve_obj = ECC.generate(curve=curve)\n g = curve_obj.pointQ # Base point\n h = ECC.generate(curve=curve).pointQ # Another random point\n return g, h" }, { "identifier": "ecc_point_to_hex", "path": "storage/shared/ecc.py", "snippet": "def ecc_point_to_hex(point):\n \"\"\"\n Convert an elliptic curve point to a hexadecimal string.\n\n This encoding is typically used for compact representation or for preparing the data\n to be transmitted over protocols that may not support binary data.\n\n Parameters:\n - point (ECC.EccPoint): An ECC point to convert.\n\n Returns:\n - str: Hexadecimal string representing the elliptic curve point.\n\n Raises:\n - AttributeError: If the input is not a valid ECC point with accessible x and y coordinates.\n \"\"\"\n point_str = \"{},{}\".format(point.x, point.y)\n return binascii.hexlify(point_str.encode()).decode()" }, { "identifier": "b64_encode", "path": "storage/shared/utils.py", "snippet": "def b64_encode(data: Union[bytes, str, List[str], List[bytes], dict]) -> str:\n \"\"\"\n Encodes the given data into a base64 string. If the data is a list or dictionary of bytes, it converts\n the bytes into hexadecimal strings before encoding.\n\n Args:\n data (list or dict): The data to be base64 encoded. Can be a list of bytes or a dictionary with bytes values.\n\n Returns:\n str: The base64 encoded string of the input data.\n\n Raises:\n TypeError: If the input is not a list, dict, or bytes.\n \"\"\"\n if isinstance(data, bytes):\n data = data.hex()\n if isinstance(data, list) and len(data) and isinstance(data[0], bytes):\n data = [d.hex() for d in data]\n if isinstance(data, dict) and isinstance(data[list(data.keys())[0]], bytes):\n data = {k: v.hex() for k, v in data.items()}\n return base64.b64encode(json.dumps(data).encode()).decode(\"utf-8\")" }, { "identifier": "make_random_file", "path": "storage/validator/utils.py", "snippet": "def make_random_file(name: str = None, maxsize: int = None) -> Union[bytes, str]:\n \"\"\"\n Creates a file with random binary data or returns a bytes object with random data if no name is provided.\n\n Args:\n name (str, optional): The name of the file to create. If None, the function returns the random data instead.\n maxsize (int): The maximum size of the file or bytes object to be created, in bytes. Defaults to 1024.\n\n Returns:\n bytes: If 'name' is not provided, returns a bytes object containing random data.\n None: If 'name' is provided, a file is created and returns the filepath stored.\n\n Raises:\n OSError: If the function encounters an error while writing to the file.\n \"\"\"\n size = (\n random.randint(random.randint(24, 128), maxsize)\n if maxsize != None\n else generate_file_size_with_lognormal()\n )\n data = os.urandom(size)\n if isinstance(name, str):\n with open(name, \"wb\") as fout:\n fout.write(data)\n return name # Return filepath of saved data\n else:\n return data # Return the data itself" }, { "identifier": "compute_chunk_distribution_mut_exclusive_numpy_reuse_uids", "path": "storage/validator/utils.py", "snippet": "async def compute_chunk_distribution_mut_exclusive_numpy_reuse_uids(\n self, data_size, R, k, chunk_size=None, exclude=None\n):\n \"\"\"\n Asynchronously computes a distribution of data chunks across a set of unique identifiers (UIDs),\n taking into account redundancy and chunk size optimization. This function is useful for distributing\n data across a network of nodes or miners in a way that ensures redundancy and optimal utilization.\n\n Parameters:\n self: Reference to the class instance from which this method is called.\n data_size (int): The total size of the data to be distributed, in bytes.\n R (int): Redundancy factor, denoting the number of times each chunk should be replicated.\n k (int): The number of unique identifiers (UIDs) to be involved in the distribution.\n chunk_size (int, optional): The size of each data chunk. If not provided, an optimal chunk size\n is calculated based on the data size and the number of UIDs.\n\n Yields:\n dict: A dictionary representing a chunk's metadata, including its size, start index, end index,\n the UIDs assigned to it, and its index in the chunk sequence.\n\n Raises:\n ValueError: If the redundancy factor R is greater than the number of available UIDs.\n\n Note:\n - This function is designed to be used in distributed storage or processing systems where\n data needs to be split and stored across multiple nodes with redundancy.\n - It evenly divides the data into chunks and assigns UIDs to each chunk while ensuring that\n the redundancy requirements are met.\n \"\"\"\n\n available_uids = await get_available_query_miners(self, k=k, exclude=exclude)\n chunk_size = chunk_size or optimal_chunk_size(data_size, len(available_uids), R)\n available_uids = adjust_uids_to_multiple(available_uids, R)\n chunk_indices = calculate_chunk_indices(data_size, chunk_size)\n\n if R > len(available_uids):\n raise ValueError(\n \"Redundancy factor cannot be greater than the number of available UIDs.\"\n )\n\n # Create initial UID groups\n initial_uid_groups = partition_uids(available_uids, R)\n uid_groups = list(initial_uid_groups)\n\n # If more groups are needed, start reusing UIDs\n total_chunks_needed = data_size // chunk_size\n while len(uid_groups) < total_chunks_needed:\n for group in cycle(initial_uid_groups):\n if len(uid_groups) >= total_chunks_needed:\n break\n uid_groups.append(group)\n\n for i, ((start, end), uid_group) in enumerate(zip(chunk_indices, uid_groups)):\n yield {\n \"chunk_size\": chunk_size,\n \"start_idx\": start,\n \"end_idx\": end,\n \"uids\": uid_group,\n \"chunk_index\": i,\n }" }, { "identifier": "encrypt_data", "path": "storage/validator/encryption.py", "snippet": "NACL_SALT = b\"\\x13q\\x83\\xdf\\xf1Z\\t\\xbc\\x9c\\x90\\xb5Q\\x879\\xe9\\xb1\"\ndef encrypt_aes(filename: typing.Union[bytes, str], key: bytes) -> bytes:\ndef decrypt_aes(cipher_text: bytes, key: bytes, nonce: bytes, tag: bytes) -> bytes:\ndef encrypt_data_with_wallet(data: bytes, wallet) -> bytes:\ndef decrypt_data_with_coldkey_private_key(\n encrypted_data: bytes, private_key: typing.Union[str, bytes]\n) -> bytes:\ndef decrypt_data_with_wallet(encrypted_data: bytes, wallet) -> bytes:\ndef encrypt_data_with_aes_and_serialize(\n data: bytes, wallet: bt.wallet\n) -> typing.Tuple[bytes, bytes]:\ndef decrypt_data_and_deserialize(\n encrypted_data: bytes, encryption_payload: bytes, wallet: bt.wallet\n) -> bytes:\ndef decrypt_data_and_deserialize_with_coldkey_private_key(\n encrypted_data: bytes,\n encryption_payload: bytes,\n private_key: typing.Union[str, bytes],\n) -> bytes:\ndef serialize_nacl_encrypted_message(encrypted_message: EncryptedMessage) -> str:\ndef deserialize_nacl_encrypted_message(serialized_data: str) -> EncryptedMessage:\ndef setup_encryption_wallet(\n wallet_name=\"encryption\",\n wallet_hotkey=\"encryption\",\n password=\"dummy_password\",\n n_words=12,\n use_encryption=False,\n overwrite=False,\n):" }, { "identifier": "verify_store_with_seed", "path": "storage/validator/verify.py", "snippet": "def verify_store_with_seed(synapse, b64_encrypted_data, seed, verbose=False):\n \"\"\"\n Verifies the storing process in a decentralized network using the provided synapse and seed.\n This function decodes the data, reconstructs the hash using the seed, and verifies it against the commitment hash.\n It also opens the commitment to validate the process.\n Args:\n synapse (Synapse): The synapse object containing store process details.\n verbose (bool, optional): Enables verbose logging for debugging. Defaults to False.\n Returns:\n bool: True if the storing process is verified successfully, False otherwise.\n \"\"\"\n try:\n encrypted_data = base64.b64decode(b64_encrypted_data)\n except Exception as e:\n bt.logging.error(f\"Could not decode store data with error: {e}\")\n return False\n\n seed_value = str(seed).encode()\n reconstructed_hash = hash_data(encrypted_data + seed_value)\n\n # e.g. send synapse.commitment_hash as an int for consistency\n if synapse.commitment_hash != str(reconstructed_hash):\n if verbose:\n bt.logging.error(f\"Initial commitment hash != hash(data + seed)\")\n bt.logging.error(f\"commitment hash : {synapse.commitment_hash}\")\n bt.logging.error(f\"reconstructed hash: {reconstructed_hash}\")\n bt.logging.error(f\"synapse : {synapse.dendrite.dict()}\")\n return False\n\n committer = ECCommitment(\n hex_to_ecc_point(synapse.g, synapse.curve),\n hex_to_ecc_point(synapse.h, synapse.curve),\n )\n commitment = hex_to_ecc_point(synapse.commitment, synapse.curve)\n\n if not committer.open(\n commitment,\n hash_data(encrypted_data + str(seed).encode()),\n synapse.randomness,\n ):\n bt.logging.error(f\"Opening commitment failed\")\n bt.logging.error(f\"synapse: {synapse.dendrite.dict()}\")\n return False\n\n return True" }, { "identifier": "apply_reward_scores", "path": "storage/validator/reward.py", "snippet": "def apply_reward_scores(\n self, uids, responses, rewards, timeout: float, mode: str = \"sigmoid\"\n):\n \"\"\"\n Adjusts the moving average scores for a set of UIDs based on their response times and reward values.\n\n This should reflect the distribution of axon response times (minmax norm)\n\n Parameters:\n uids (List[int]): A list of UIDs for which rewards are being applied.\n responses (List[Response]): A list of response objects received from the nodes.\n rewards (torch.FloatTensor): A tensor containing the computed reward values.\n \"\"\"\n if mode not in [\"sigmoid\", \"minmax\"]:\n raise ValueError(f\"Invalid mode: {mode}\")\n\n if self.config.neuron.verbose:\n bt.logging.debug(f\"Applying rewards: {rewards}\")\n bt.logging.debug(f\"Reward shape: {rewards.shape}\")\n bt.logging.debug(f\"UIDs: {uids}\")\n\n scaled_rewards = scale_rewards(uids, responses, rewards, timeout=timeout, mode=mode)\n bt.logging.debug(f\"apply_reward_scores() Scaled rewards: {scaled_rewards}\")\n\n # Compute forward pass rewards\n # shape: [ metagraph.n ]\n scattered_rewards: torch.FloatTensor = self.moving_averaged_scores.scatter(\n 0, torch.tensor(uids).to(self.device), scaled_rewards\n ).to(self.device)\n bt.logging.trace(f\"Scattered rewards: {scattered_rewards}\")\n\n # Update moving_averaged_scores with rewards produced by this step.\n # shape: [ metagraph.n ]\n alpha: float = self.config.neuron.moving_average_alpha\n self.moving_averaged_scores: torch.FloatTensor = alpha * scattered_rewards + (\n 1 - alpha\n ) * self.moving_averaged_scores.to(self.device)\n bt.logging.trace(f\"Updated moving avg scores: {self.moving_averaged_scores}\")" }, { "identifier": "add_metadata_to_hotkey", "path": "storage/validator/database.py", "snippet": "async def add_metadata_to_hotkey(\n ss58_address: str, data_hash: str, metadata: Dict, database: aioredis.Redis\n):\n \"\"\"\n Associates a data hash and its metadata with a hotkey in Redis.\n\n Parameters:\n ss58_address (str): The primary key representing the hotkey.\n data_hash (str): The subkey representing the data hash.\n metadata (dict): The metadata to associate with the data hash. Includes the size of the data, the seed,\n and the encryption payload. E.g. {'size': 123, 'seed': 456, 'encryption_payload': 'abc'}.\n database (aioredis.Redis): The Redis client instance.\n \"\"\"\n # Serialize the metadata as a JSON string\n metadata_json = json.dumps(metadata)\n # Use HSET to associate the data hash with the hotkey\n key = f\"hotkey:{ss58_address}\"\n await database.hset(key, data_hash, metadata_json)\n bt.logging.trace(f\"Associated data hash {data_hash} with hotkey {ss58_address}.\")" }, { "identifier": "store_chunk_metadata", "path": "storage/validator/database.py", "snippet": "async def store_chunk_metadata(\n full_hash: str,\n chunk_hash: str,\n hotkeys: List[str],\n chunk_size: int,\n database: aioredis.Redis,\n):\n \"\"\"\n Store metadata for a specific file chunk.\n\n This function creates or updates the metadata for a chunk, including the associated hotkeys and chunk size.\n\n Parameters:\n - full_hash (str): The full hash of the file that the chunk belongs to.\n - chunk_hash (str): The hash of the chunk whose metadata is to be stored.\n - hotkeys (List[str]): A list of hotkeys associated with the chunk.\n - chunk_size (int): The size of the chunk in bytes.\n - database (aioredis.Redis): An instance of the Redis database.\n \"\"\"\n chunk_metadata_key = f\"chunk:{chunk_hash}\"\n existing_metadata = await database.hget(chunk_metadata_key, \"hotkeys\")\n if existing_metadata:\n existing_hotkeys = existing_metadata.decode().split(\",\")\n hotkeys = set(existing_hotkeys + hotkeys)\n metadata = {\"hotkeys\": \",\".join(hotkeys), \"size\": chunk_size}\n\n await database.hmset(chunk_metadata_key, metadata)" }, { "identifier": "store_file_chunk_mapping_ordered", "path": "storage/validator/database.py", "snippet": "async def store_file_chunk_mapping_ordered(\n full_hash: str,\n chunk_hashes: List[str],\n chunk_indices: List[str],\n database: aioredis.Redis,\n encryption_payload: Optional[Union[bytes, dict]] = None,\n):\n \"\"\"\n Store an ordered mapping of file chunks in the database.\n\n This function takes a file's full hash and the hashes of its individual chunks, along with their\n respective indices, and stores them in a sorted set in the Redis database. The order is preserved\n based on the chunk index.\n\n Parameters:\n - full_hash (str): The full hash of the file.\n - chunk_hashes (List[str]): A list of hashes for the individual chunks of the file.\n - chunk_indices (List[int]): A list of indices corresponding to each chunk hash.\n - database (aioredis.Redis): An instance of the Redis database.\n - encryption_payload (Optional[Union[bytes, dict]]): The encryption payload to store with the file.\n \"\"\"\n key = f\"file:{full_hash}\"\n for chunk_index, chunk_hash in zip(chunk_indices, chunk_hashes):\n await database.zadd(key, {chunk_hash: chunk_index})\n\n # Store the encryption payload if provided\n if encryption_payload:\n if isinstance(encryption_payload, dict):\n encryption_payload = json.dumps(encryption_payload)\n await database.set(f\"payload:{full_hash}\", encryption_payload)" }, { "identifier": "get_ordered_metadata", "path": "storage/validator/database.py", "snippet": "async def get_ordered_metadata(\n file_hash: str, database: aioredis.Redis\n) -> List[Dict[str, Union[str, List[str], int]]]:\n \"\"\"\n Retrieve the metadata for all chunks of a file in the order of their indices.\n\n This function calls `get_all_chunks_for_file` to fetch all chunks' metadata and then sorts\n them based on their indices to maintain the original file order.\n\n Parameters:\n - file_hash (str): The full hash of the file whose ordered metadata is to be retrieved.\n - database (aioredis.Redis): An instance of the Redis database.\n\n Returns:\n - List[dict]: A list of metadata dictionaries for each chunk, ordered by their chunk index.\n Returns None if no chunks are found.\n \"\"\"\n chunks_info = await get_all_chunks_for_file(file_hash, database)\n if chunks_info is None:\n return None\n\n ordered_chunks = sorted(chunks_info.items(), key=lambda x: x[0])\n return [chunk_info for _, chunk_info in ordered_chunks]" }, { "identifier": "hotkey_at_capacity", "path": "storage/validator/database.py", "snippet": "async def hotkey_at_capacity(\n hotkey: str, database: aioredis.Redis, verbose: bool = False\n) -> bool:\n \"\"\"\n Checks if the hotkey is at capacity.\n\n Parameters:\n database (aioredis.Redis): The Redis client instance.\n hotkey (str): The key representing the hotkey.\n\n Returns:\n True if the hotkey is at capacity, False otherwise.\n \"\"\"\n # Get the total storage used by the hotkey\n total_storage = await total_hotkey_storage(hotkey, database, verbose)\n # Check if the hotkey is at capacity\n byte_limit = await database.hget(f\"stats:{hotkey}\", \"storage_limit\")\n if byte_limit is None:\n if verbose:\n bt.logging.trace(f\"Could not find storage limit for {hotkey}.\")\n return False\n try:\n limit = int(byte_limit)\n except Exception as e:\n if verbose:\n bt.logging.trace(f\"Could not parse storage limit for {hotkey} | {e}.\")\n return False\n if total_storage >= limit:\n if verbose:\n bt.logging.trace(\n f\"Hotkey {hotkey} is at max capacity {limit // 1024**3} GB.\"\n )\n return True\n else:\n if verbose:\n bt.logging.trace(\n f\"Hotkey {hotkey} has {(limit - total_storage) // 1024**3} GB free.\"\n )\n return False" }, { "identifier": "update_statistics", "path": "storage/validator/bonding.py", "snippet": "async def update_statistics(\n ss58_address: str, success: bool, task_type: str, database: aioredis.Redis\n):\n \"\"\"\n Updates the statistics of a miner in the decentralized storage system.\n If the miner is not already registered, they are registered first. This function updates\n the miner's statistics based on the task performed (store, challenge, retrieve) and whether\n it was successful.\n Args:\n ss58_address (str): The unique address (hotkey) of the miner.\n success (bool): Indicates whether the task was successful or not.\n task_type (str): The type of task performed ('store', 'challenge', 'retrieve').\n database (redis.Redis): The Redis client instance for database operations.\n \"\"\"\n # Check and see if this miner is registered.\n if not await miner_is_registered(ss58_address, database):\n bt.logging.debug(f\"Registering new miner {ss58_address}...\")\n await register_miner(ss58_address, database)\n\n # Update statistics in the stats hash\n stats_key = f\"stats:{ss58_address}\"\n\n if task_type in [\"store\", \"challenge\", \"retrieve\"]:\n await database.hincrby(stats_key, f\"{task_type}_attempts\", 1)\n if success:\n await database.hincrby(stats_key, f\"{task_type}_successes\", 1)\n\n # Transition retireval -> retrieve successes (legacy)\n legacy_retrieve_successes = await database.hget(stats_key, \"retrieval_successes\")\n if legacy_retrieve_successes != None:\n await database.hset(\n stats_key, \"retrieve_successes\", int(legacy_retrieve_successes)\n )\n await database.hdel(stats_key, \"retrieval_successes\")\n\n # Transition retireval -> retrieve attempts (legacy)\n legacy_retrieve_attempts = await database.hget(stats_key, \"retrieval_attempts\")\n if legacy_retrieve_attempts != None:\n await database.hset(\n stats_key, \"retrieve_attempts\", int(legacy_retrieve_attempts)\n )\n await database.hdel(stats_key, \"retrieval_attempts\")\n\n # Update the total successes that we rollover every epoch\n if await database.hget(stats_key, \"total_successes\") == None:\n store_successes = int(await database.hget(stats_key, \"store_successes\"))\n challenge_successes = int(await database.hget(stats_key, \"challenge_successes\"))\n retrieval_successes = int(await database.hget(stats_key, \"retrieve_successes\"))\n total_successes = store_successes + retrieval_successes + challenge_successes\n await database.hset(stats_key, \"total_successes\", total_successes)\n if success:\n await database.hincrby(stats_key, \"total_successes\", 1)" }, { "identifier": "create_reward_vector", "path": "storage/validator/reward.py", "snippet": "async def create_reward_vector(\n self,\n synapse: Union[Store, Retrieve, Challenge],\n rewards: torch.FloatTensor,\n uids: List[int],\n responses: List[Synapse],\n event: EventSchema,\n callback: callable,\n fail_callback: callable,\n):\n # Determine if the commitment is valid\n success = False\n if isinstance(synapse, Store):\n verify_fn = partial(\n verify_store_with_seed,\n b64_encrypted_data=synapse.encrypted_data,\n seed=synapse.seed,\n )\n task_type = \"store\"\n failure_reward = STORE_FAILURE_REWARD\n elif isinstance(synapse, Retrieve):\n verify_fn = partial(verify_retrieve_with_seed, seed=synapse.seed)\n task_type = \"retrieve\"\n failure_reward = RETRIEVAL_FAILURE_REWARD\n elif isinstance(synapse, Challenge):\n verify_fn = partial(verify_challenge_with_seed, seed=synapse.seed)\n task_type = \"challenge\"\n failure_reward = CHALLENGE_FAILURE_REWARD\n else:\n raise ValueError(f\"Invalid synapse type: {type(synapse)}\")\n\n for idx, (uid, response) in enumerate(zip(uids, responses)):\n # Verify the commitment\n hotkey = self.metagraph.hotkeys[uid]\n\n # Determine if the commitment is valid\n success = verify_fn(synapse=response)\n if success:\n bt.logging.debug(\n f\"Successfully verified {synapse.__class__} commitment from UID: {uid} | hotkey: {hotkey}\"\n )\n await callback(hotkey, idx, uid, response)\n else:\n bt.logging.error(\n f\"create_reward_vector() Failed to verify store commitment from UID: {uid} | hotkey: {hotkey}\"\n )\n fail_callback(uid)\n\n # Update the storage statistics\n await update_statistics(\n ss58_address=hotkey,\n success=success,\n task_type=task_type,\n database=self.database,\n )\n\n # Apply reward for this task\n tier_factor = await get_tier_factor(hotkey, self.database)\n rewards[idx] = 1.0 * tier_factor if success else failure_reward * tier_factor\n\n event.successful.append(success)\n event.uids.append(uid)\n event.completion_times.append(response.dendrite.process_time)\n event.task_status_messages.append(response.dendrite.status_message)\n event.task_status_codes.append(response.dendrite.status_code)" }, { "identifier": "ping_and_retry_uids", "path": "storage/validator/network.py", "snippet": "async def ping_and_retry_uids(\n self, k: int = None, max_retries: int = 3, exclude_uids: typing.List[int] = []\n):\n \"\"\"\n Fetch available uids to minimize waiting for timeouts if they're going to fail anyways...\n \"\"\"\n # Select initial subset of miners to query\n uids = await get_available_query_miners(\n self, k=k or self.config.neuron.store_redundancy, exclude=exclude_uids\n )\n bt.logging.debug(\"initial ping_and_retry() uids:\", uids)\n\n retries = 0\n successful_uids = set()\n failed_uids = set()\n while len(successful_uids) < k and retries < max_retries:\n # Ping all UIDs\n current_successful_uids, current_failed_uids = await ping_uids(self, uids)\n successful_uids.update(current_successful_uids)\n failed_uids.update(current_failed_uids)\n\n # If enough UIDs are successful, select the first k items\n if len(successful_uids) >= k:\n uids = list(successful_uids)[:k]\n break\n\n # Reroll for k UIDs excluding the successful ones\n new_uids = await get_available_query_miners(\n self, k=k, exclude=list(successful_uids.union(failed_uids))\n )\n bt.logging.debug(f\"ping_and_retry() new uids: {new_uids}\")\n retries += 1\n\n # Log if the maximum retries are reached without enough successful UIDs\n if len(successful_uids) < k:\n bt.logging.warning(\n f\"Insufficient successful UIDs for k: {k} Success UIDs {successful_uids} Failed UIDs: {failed_uids}\"\n )\n\n return list(successful_uids)[:k], failed_uids" }, { "identifier": "compute_and_ping_chunks", "path": "storage/validator/network.py", "snippet": "async def compute_and_ping_chunks(self, distributions):\n \"\"\"\n Asynchronously evaluates the availability of miners for the given chunk distributions by pinging them.\n Rerolls the distribution to replace failed miners, ensuring exactly k successful miners are selected.\n\n Parameters:\n distributions (list of dicts): A list of chunk distribution dictionaries, each containing\n information about chunk indices and assigned miner UIDs.\n\n Returns:\n list of dicts: The updated list of chunk distributions with exactly k successful miner UIDs.\n\n Note:\n - This function is crucial for ensuring that data chunks are assigned to available and responsive miners.\n - Pings miners based on their UIDs and updates the distributions accordingly.\n - Logs the new set of UIDs and distributions for traceability.\n \"\"\"\n max_retries = 3 # Define the maximum number of retries\n target_number_of_uids = len(\n distributions[0][\"uids\"]\n ) # Assuming k is the length of the uids in the first distribution\n\n for dist in distributions:\n retries = 0\n successful_uids = set()\n\n while len(successful_uids) < target_number_of_uids and retries < max_retries:\n # Ping all UIDs\n current_successful_uids, _ = await ping_uids(self, dist[\"uids\"])\n successful_uids.update(current_successful_uids)\n\n # If enough UIDs are successful, select the first k items\n if len(successful_uids) >= target_number_of_uids:\n dist[\"uids\"] = tuple(sorted(successful_uids)[:target_number_of_uids])\n break\n\n # Reroll for k UIDs excluding the successful ones\n new_uids = await get_available_query_miners(\n self, k=target_number_of_uids, exclude=successful_uids\n )\n bt.logging.trace(\"compute_and_ping_chunks() new uids:\", new_uids)\n\n # Update the distribution with new UIDs\n dist[\"uids\"] = tuple(new_uids)\n retries += 1\n\n # Log if the maximum retries are reached without enough successful UIDs\n if len(successful_uids) < target_number_of_uids:\n bt.logging.warning(\n f\"compute_and_ping_chunks(): Insufficient successful UIDs for distribution: {dist}\"\n )\n\n # Continue with your logic using the updated distributions\n bt.logging.trace(\"new distributions:\", distributions)\n return distributions" }, { "identifier": "reroll_distribution", "path": "storage/validator/network.py", "snippet": "async def reroll_distribution(self, distribution, failed_uids):\n \"\"\"\n Asynchronously rerolls a single data chunk distribution by replacing failed miner UIDs with new, available ones.\n This is part of the error handling process in data distribution to ensure that each chunk is reliably stored.\n\n Parameters:\n distribution (dict): The original chunk distribution dictionary, containing chunk information and miner UIDs.\n failed_uids (list of int): List of UIDs that failed in the original distribution and need replacement.\n\n Returns:\n dict: The updated chunk distribution with new miner UIDs replacing the failed ones.\n\n Note:\n - This function is typically used when certain miners are unresponsive or unable to store the chunk.\n - Ensures that each chunk has the required number of active miners for redundancy.\n \"\"\"\n # Get new UIDs to replace the failed ones\n new_uids = await get_available_query_miners(\n self, k=len(failed_uids), exclude=failed_uids\n )\n distribution[\"uids\"] = new_uids\n return distribution" }, { "identifier": "compute_chunk_distribution_mut_exclusive_numpy_reuse_uids", "path": "storage/validator/utils.py", "snippet": "async def compute_chunk_distribution_mut_exclusive_numpy_reuse_uids(\n self, data_size, R, k, chunk_size=None, exclude=None\n):\n \"\"\"\n Asynchronously computes a distribution of data chunks across a set of unique identifiers (UIDs),\n taking into account redundancy and chunk size optimization. This function is useful for distributing\n data across a network of nodes or miners in a way that ensures redundancy and optimal utilization.\n\n Parameters:\n self: Reference to the class instance from which this method is called.\n data_size (int): The total size of the data to be distributed, in bytes.\n R (int): Redundancy factor, denoting the number of times each chunk should be replicated.\n k (int): The number of unique identifiers (UIDs) to be involved in the distribution.\n chunk_size (int, optional): The size of each data chunk. If not provided, an optimal chunk size\n is calculated based on the data size and the number of UIDs.\n\n Yields:\n dict: A dictionary representing a chunk's metadata, including its size, start index, end index,\n the UIDs assigned to it, and its index in the chunk sequence.\n\n Raises:\n ValueError: If the redundancy factor R is greater than the number of available UIDs.\n\n Note:\n - This function is designed to be used in distributed storage or processing systems where\n data needs to be split and stored across multiple nodes with redundancy.\n - It evenly divides the data into chunks and assigns UIDs to each chunk while ensuring that\n the redundancy requirements are met.\n \"\"\"\n\n available_uids = await get_available_query_miners(self, k=k, exclude=exclude)\n chunk_size = chunk_size or optimal_chunk_size(data_size, len(available_uids), R)\n available_uids = adjust_uids_to_multiple(available_uids, R)\n chunk_indices = calculate_chunk_indices(data_size, chunk_size)\n\n if R > len(available_uids):\n raise ValueError(\n \"Redundancy factor cannot be greater than the number of available UIDs.\"\n )\n\n # Create initial UID groups\n initial_uid_groups = partition_uids(available_uids, R)\n uid_groups = list(initial_uid_groups)\n\n # If more groups are needed, start reusing UIDs\n total_chunks_needed = data_size // chunk_size\n while len(uid_groups) < total_chunks_needed:\n for group in cycle(initial_uid_groups):\n if len(uid_groups) >= total_chunks_needed:\n break\n uid_groups.append(group)\n\n for i, ((start, end), uid_group) in enumerate(zip(chunk_indices, uid_groups)):\n yield {\n \"chunk_size\": chunk_size,\n \"start_idx\": start,\n \"end_idx\": end,\n \"uids\": uid_group,\n \"chunk_index\": i,\n }" } ]
import os import sys import copy import time import torch import base64 import typing import asyncio import aioredis import bittensor as bt import websocket from pprint import pformat from pyinstrument import Profiler from Crypto.Random import get_random_bytes, random from dataclasses import asdict from storage.validator.event import EventSchema from storage import protocol from storage.shared.ecc import ( hash_data, setup_CRS, ecc_point_to_hex, ) from storage.shared.utils import b64_encode from storage.validator.utils import ( make_random_file, compute_chunk_distribution_mut_exclusive_numpy_reuse_uids, ) from storage.validator.encryption import encrypt_data from storage.validator.verify import verify_store_with_seed from storage.validator.reward import apply_reward_scores from storage.validator.database import ( add_metadata_to_hotkey, store_chunk_metadata, store_file_chunk_mapping_ordered, get_ordered_metadata, hotkey_at_capacity, ) from storage.validator.bonding import update_statistics from .reward import create_reward_vector from .network import ping_and_retry_uids, compute_and_ping_chunks, reroll_distribution from .utils import compute_chunk_distribution_mut_exclusive_numpy_reuse_uids
10,879
rewards=[], moving_averaged_scores=[], ) g, h = setup_CRS(curve=self.config.neuron.curve) bt.logging.debug(f"type(chunk): {type(chunk)}") bt.logging.debug(f"chunk: {chunk[:100]}") chunk = chunk.encode("utf-8") if isinstance(chunk, str) else chunk b64_encoded_chunk = await asyncio.to_thread(base64.b64encode, chunk) b64_encoded_chunk = b64_encoded_chunk.decode("utf-8") bt.logging.debug(f"b64_encoded_chunk: {b64_encoded_chunk[:100]}") random_seed = get_random_bytes(32).hex() synapse = protocol.Store( encrypted_data=b64_encoded_chunk, curve=self.config.neuron.curve, g=ecc_point_to_hex(g), h=ecc_point_to_hex(h), seed=random_seed, ) uids = [ uid for uid in uids if not await hotkey_at_capacity(self.metagraph.hotkeys[uid], self.database) ] axons = [self.metagraph.axons[uid] for uid in uids] responses = await self.dendrite( axons, synapse, deserialize=False, timeout=self.config.neuron.store_timeout, ) # Compute the rewards for the responses given proc time. rewards: torch.FloatTensor = torch.zeros( len(responses), dtype=torch.float32 ).to(self.device) async def success(hotkey, idx, uid, response): bt.logging.debug(f"Stored data in database with key: {hotkey}") failed_uids = [] def failure(uid): failed_uids.append(uid) await create_reward_vector( self, synapse, rewards, uids, responses, event, success, failure ) event.rewards.extend(rewards.tolist()) apply_reward_scores( self, uids, responses, rewards, timeout=self.config.neuron.store_timeout, mode=self.config.neuron.reward_mode, ) bt.logging.debug(f"Updated reward scores: {rewards.tolist()}") # Determine the best UID based on rewards if event.rewards: best_index = max(range(len(event.rewards)), key=event.rewards.__getitem__) event.best_uid = event.uids[best_index] event.best_hotkey = self.metagraph.hotkeys[event.best_uid] chunk_size = sys.getsizeof(chunk) # chunk size in bytes bt.logging.debug(f"chunk size: {chunk_size}") await store_chunk_metadata( full_hash, chunk_hash, [self.metagraph.hotkeys[uid] for uid in uids], chunk_size, # this should be len(chunk) but we need to fix the chunking self.database, ) return responses, b64_encoded_chunk, random_seed async def handle_uid_operations( uid, response, b64_encoded_chunk, random_seed, chunk_hash, chunk_size ): ss = time.time() start = time.time() # Offload the CPU-intensive verification to a separate thread verified = await asyncio.to_thread( verify_store_with_seed, response, b64_encoded_chunk, random_seed ) end = time.time() bt.logging.debug(f"verify_store_with_seed time for uid {uid} : {end-start}") if verified: # Prepare storage for the data for particular miner response_storage = { "prev_seed": response.seed, "size": chunk_size, "encryption_payload": encryption_payload, } start = time.time() # Store in the database according to the data hash and the miner hotkey await add_metadata_to_hotkey( self.metagraph.hotkeys[uid], chunk_hash, response_storage, # seed + size + encryption keys self.database, ) end = time.time() bt.logging.debug( f"Stored data in database for uid: {uid} | {str(chunk_hash)}" ) else: bt.logging.error(f"Failed to verify store commitment from UID: {uid}") # Update the storage statistics
# The MIT License (MIT) # Copyright © 2023 Yuma Rao # Copyright © 2023 philanthrope # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated # documentation files (the “Software”), to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all copies or substantial portions of # the Software. # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO # THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. async def store_encrypted_data( self, encrypted_data: typing.Union[bytes, str], encryption_payload: dict, exclude_uids: typing.List[str] = [], ttl: int = 0, k: int = None, max_retries: int = 3, ) -> bool: event = EventSchema( task_name="Store", successful=[], completion_times=[], task_status_messages=[], task_status_codes=[], block=self.subtensor.get_current_block(), uids=[], step_length=0.0, best_uid="", best_hotkey="", rewards=[], moving_averaged_scores=[], ) start_time = time.time() encrypted_data = ( encrypted_data.encode("utf-8") if isinstance(encrypted_data, str) else encrypted_data ) # Setup CRS for this round of validation g, h = setup_CRS(curve=self.config.neuron.curve) # Hash the data data_hash = hash_data(encrypted_data) # Convert to base64 for compactness # TODO: Don't do this if it's already b64 encoded. (Check first) b64_encrypted_data = base64.b64encode(encrypted_data).decode("utf-8") if self.config.neuron.verbose: bt.logging.debug(f"storing user data: {encrypted_data[:12]}...") bt.logging.debug(f"storing user hash: {data_hash}") bt.logging.debug(f"b64 encrypted data: {b64_encrypted_data[:12]}...") synapse = protocol.Store( encrypted_data=b64_encrypted_data, curve=self.config.neuron.curve, g=ecc_point_to_hex(g), h=ecc_point_to_hex(h), seed=get_random_bytes(32).hex(), # 256-bit seed ) # Select subset of miners to query (e.g. redunancy factor of N) uids, _ = await ping_and_retry_uids( self, k=k or self.config.neuron.store_redundancy, max_retries=max_retries, exclude_uids=exclude_uids, ) bt.logging.debug(f"store_encrypted_data() uids: {uids}") axons = [self.metagraph.axons[uid] for uid in uids] failed_uids = [None] retries = 0 while len(failed_uids) and retries < max_retries: if failed_uids == [None]: # initial loop failed_uids = [] # Broadcast the query to selected miners on the network. responses = await self.dendrite( axons, synapse, deserialize=False, timeout=self.config.neuron.store_timeout, ) # Compute the rewards for the responses given proc time. rewards: torch.FloatTensor = torch.zeros( len(responses), dtype=torch.float32 ).to(self.device) async def success(hotkey, idx, uid, response): # Prepare storage for the data for particular miner response_storage = { "prev_seed": synapse.seed, "size": sys.getsizeof(encrypted_data), # in bytes, not len(data) "encryption_payload": encryption_payload, } bt.logging.trace(f"Storing UID {uid} data {pformat(response_storage)}") # Store in the database according to the data hash and the miner hotkey await add_metadata_to_hotkey( hotkey, data_hash, response_storage, self.database, ) if ttl > 0: await self.database.expire( f"{hotkey}:{data_hash}", ttl, ) bt.logging.debug( f"Stored data in database with hotkey: {hotkey} | uid {uid} | {data_hash}" ) def failure(uid): failed_uids.append(uid) await create_reward_vector( self, synapse, rewards, uids, responses, event, success, failure ) event.rewards.extend(rewards.tolist()) if self.config.neuron.verbose and self.config.neuron.log_responses: bt.logging.debug(f"Store responses round: {retries}") [ bt.logging.debug(f"Store response: {response.dendrite.dict()}") for response in responses ] bt.logging.trace(f"Applying store rewards for retry: {retries}") apply_reward_scores( self, uids, responses, rewards, timeout=self.config.neuron.store_timeout, mode=self.config.neuron.reward_mode, ) # Get a new set of UIDs to query for those left behind if failed_uids != []: bt.logging.trace(f"Failed to store on uids: {failed_uids}") uids, _ = await ping_and_retry_uids( self, k=len(failed_uids), exclude_uids=exclude_uids ) bt.logging.trace(f"Retrying with new uids: {uids}") axons = [self.metagraph.axons[uid] for uid in uids] failed_uids = [] # reset failed uids for next round retries += 1 # Calculate step length end_time = time.time() event.step_length = end_time - start_time # Determine the best UID based on rewards if event.rewards: best_index = max(range(len(event.rewards)), key=event.rewards.__getitem__) event.best_uid = event.uids[best_index] event.best_hotkey = self.metagraph.hotkeys[event.best_uid] # Update event log with moving averaged scores event.moving_averaged_scores = self.moving_averaged_scores.tolist() return event async def store_random_data(self): """ Stores data on the network and ensures it is correctly committed by the miners. Parameters: - data (bytes, optional): The data to be stored. - wallet (bt.wallet, optional): The wallet to be used for encrypting the data. Returns: - The status of the data storage operation. """ # Setup CRS for this round of validation g, h = setup_CRS(curve=self.config.neuron.curve) # Make a random bytes file to test the miner if none provided data = make_random_file(maxsize=self.config.neuron.maxsize) bt.logging.debug(f"Random store data size: {sys.getsizeof(data)}") # Encrypt the data # TODO: create and use a throwaway wallet (never decrypable) encrypted_data, encryption_payload = encrypt_data(data, self.encryption_wallet) return await store_encrypted_data( self, encrypted_data, encryption_payload, k=self.config.neuron.store_sample_size, ttl=self.config.neuron.data_ttl, ) async def store_broadband( self, encrypted_data, encryption_payload, R=3, k=10, data_hash=None, exclude_uids=None, ): """ Asynchronously stores encrypted data across a distributed network by splitting it into chunks and assigning these chunks to various miners for storage. This method ensures redundancy and efficient data distribution while handling network requests concurrently. The process includes chunking the data, selecting miners for storage, and verifying the integrity of stored data through response validation. Parameters: encrypted_data (bytes): The encrypted data to be stored across the network. encryption_payload (dict): Additional payload information required for encryption. R (int, optional): The redundancy factor, denoting how many times each chunk is replicated. Default is 3. k (int, optional): The number of miners to query for each chunk. Default is 10. data_hash (str, optional): The hash of the data to be stored. If not provided, compute it. Default is None. exclude_uids: (list of int, optional): A list of UIDs to exclude from the storage process. Default is None. Returns: str: The hash of the full data, representing its unique identifier in the network. Raises: Exception: If the process of creating initial distributions fails after multiple retries. Note: - Uses a semaphore to limit the number of concurrent network requests. - Employs a retry mechanism for handling network and miner availability issues. - Logs various stages of the process for debugging and monitoring purposes. """ if self.config.neuron.profile: # Create a profiler instance profiler = Profiler() profiler.start() semaphore = asyncio.Semaphore(self.config.neuron.semaphore_size) async def store_chunk_group(chunk_hash, chunk, uids): event = EventSchema( task_name="Store", successful=[], completion_times=[], task_status_messages=[], task_status_codes=[], block=self.subtensor.get_current_block(), uids=[], step_length=0.0, best_uid="", best_hotkey="", rewards=[], moving_averaged_scores=[], ) g, h = setup_CRS(curve=self.config.neuron.curve) bt.logging.debug(f"type(chunk): {type(chunk)}") bt.logging.debug(f"chunk: {chunk[:100]}") chunk = chunk.encode("utf-8") if isinstance(chunk, str) else chunk b64_encoded_chunk = await asyncio.to_thread(base64.b64encode, chunk) b64_encoded_chunk = b64_encoded_chunk.decode("utf-8") bt.logging.debug(f"b64_encoded_chunk: {b64_encoded_chunk[:100]}") random_seed = get_random_bytes(32).hex() synapse = protocol.Store( encrypted_data=b64_encoded_chunk, curve=self.config.neuron.curve, g=ecc_point_to_hex(g), h=ecc_point_to_hex(h), seed=random_seed, ) uids = [ uid for uid in uids if not await hotkey_at_capacity(self.metagraph.hotkeys[uid], self.database) ] axons = [self.metagraph.axons[uid] for uid in uids] responses = await self.dendrite( axons, synapse, deserialize=False, timeout=self.config.neuron.store_timeout, ) # Compute the rewards for the responses given proc time. rewards: torch.FloatTensor = torch.zeros( len(responses), dtype=torch.float32 ).to(self.device) async def success(hotkey, idx, uid, response): bt.logging.debug(f"Stored data in database with key: {hotkey}") failed_uids = [] def failure(uid): failed_uids.append(uid) await create_reward_vector( self, synapse, rewards, uids, responses, event, success, failure ) event.rewards.extend(rewards.tolist()) apply_reward_scores( self, uids, responses, rewards, timeout=self.config.neuron.store_timeout, mode=self.config.neuron.reward_mode, ) bt.logging.debug(f"Updated reward scores: {rewards.tolist()}") # Determine the best UID based on rewards if event.rewards: best_index = max(range(len(event.rewards)), key=event.rewards.__getitem__) event.best_uid = event.uids[best_index] event.best_hotkey = self.metagraph.hotkeys[event.best_uid] chunk_size = sys.getsizeof(chunk) # chunk size in bytes bt.logging.debug(f"chunk size: {chunk_size}") await store_chunk_metadata( full_hash, chunk_hash, [self.metagraph.hotkeys[uid] for uid in uids], chunk_size, # this should be len(chunk) but we need to fix the chunking self.database, ) return responses, b64_encoded_chunk, random_seed async def handle_uid_operations( uid, response, b64_encoded_chunk, random_seed, chunk_hash, chunk_size ): ss = time.time() start = time.time() # Offload the CPU-intensive verification to a separate thread verified = await asyncio.to_thread( verify_store_with_seed, response, b64_encoded_chunk, random_seed ) end = time.time() bt.logging.debug(f"verify_store_with_seed time for uid {uid} : {end-start}") if verified: # Prepare storage for the data for particular miner response_storage = { "prev_seed": response.seed, "size": chunk_size, "encryption_payload": encryption_payload, } start = time.time() # Store in the database according to the data hash and the miner hotkey await add_metadata_to_hotkey( self.metagraph.hotkeys[uid], chunk_hash, response_storage, # seed + size + encryption keys self.database, ) end = time.time() bt.logging.debug( f"Stored data in database for uid: {uid} | {str(chunk_hash)}" ) else: bt.logging.error(f"Failed to verify store commitment from UID: {uid}") # Update the storage statistics
await update_statistics(
16
2023-10-26 18:54:47+00:00
16k
Eclectic-Sheep/sheeprlhf
sheeprlhf/task/train/ppo.py
[ { "identifier": "PPOAgent", "path": "sheeprlhf/agent/ppo.py", "snippet": "class PPOAgent:\n \"\"\"Agent model for PPO training.\"\"\"\n\n _reference: ActorModel\n _reward: RewardModel\n _finetune_mode: FINETUNE_MODE\n _actor: Optional[ActorModel] = None\n _critic: Optional[CriticModel] = None\n _same_actor_critic: bool = False\n _share_actor_critic: bool = False\n _share_critic_reward: bool = False\n\n _sft_checkpoint_path: str\n _sft_model_cfg: ModelConfig\n _rm_checkpoint_path: str\n _rm_model_cfg: ModelConfig\n\n _lora_enabled: bool\n _init_critic_with_reward: bool\n\n def __init__(self, model_cfg: ModelConfig, task_cfg: PPOConfig) -> None:\n self.model_cfg = model_cfg\n self._init_critic_with_reward = task_cfg.init_critic_with_reward\n\n self._sft_model_cfg, self._sft_checkpoint_path = get_model_checkpoint(\n task_cfg.sft_experiment_dir, task_cfg.sft_model_name\n )\n sft_model_name = self._sft_model_cfg.repo_name\n\n self._rm_model_cfg, self._rm_checkpoint_path = get_model_checkpoint(\n task_cfg.rm_experiment_dir, task_cfg.sft_model_name\n )\n rm_model_name = self._rm_model_cfg.repo_name\n\n self._reference = ActorModel(model_cfg=self._sft_model_cfg)\n self._reward = RewardModel(model_cfg=self._rm_model_cfg)\n\n self._same_actor_critic = sft_model_name == rm_model_name\n self._finetune_mode = model_cfg.finetune_mode\n self._lora_enabled = self._finetune_mode == FINETUNE_MODE.LORA\n if not self._init_critic_with_reward:\n if not (self._lora_enabled and self._same_actor_critic):\n # Actor and critic cannot be shared, we fallback to the default behavior\n self._actor = ActorModel(model_cfg=self._sft_model_cfg)\n self._critic = CriticModel(model_cfg=self._sft_model_cfg)\n else:\n self._share_actor_critic = True\n\n else:\n if not self._lora_enabled:\n self._actor = ActorModel(model_cfg=self._sft_model_cfg)\n self._critic = CriticModel(model_cfg=self._rm_model_cfg)\n else:\n self._share_critic_reward = True\n\n def load_checkpoint(self, device: torch.device) -> None:\n \"\"\"Load checkpoints for Actor, Critic and Reward models.\"\"\"\n self._reference.load_checkpoint(\n path=self._sft_checkpoint_path, device=device, model_cfg=self._sft_model_cfg, freeze=True\n )\n self._reward.load_checkpoint(\n path=self._rm_checkpoint_path, device=device, model_cfg=self._rm_model_cfg, freeze=True\n )\n if not self._init_critic_with_reward:\n if not (self._lora_enabled and self._same_actor_critic):\n # Actor and critic cannot be shared, we fallback to the default behavior\n self._actor.load_checkpoint(\n path=self._sft_checkpoint_path, device=device, model_cfg=self._sft_model_cfg, freeze=True\n )\n self._critic.load_checkpoint(\n path=self._sft_checkpoint_path, device=device, model_cfg=self._sft_model_cfg, freeze=True\n )\n else:\n if not self._lora_enabled:\n self._critic.load_checkpoint(\n path=self._rm_checkpoint_path, device=device, model_cfg=self._rm_model_cfg, freeze=True\n )\n self._actor.load_checkpoint(\n path=self._sft_checkpoint_path, device=device, model_cfg=self._sft_model_cfg, freeze=True\n )\n\n def setup_finetuning(self, model_cfg: Optional[ModelConfig] = None) -> None:\n \"\"\"Setup finetuning for Actor, Critic and Reward models.\"\"\"\n if model_cfg is None:\n model_cfg = self.model_cfg\n lora_cfg = self.model_cfg.lora_cfg\n if not self._init_critic_with_reward:\n if self._lora_enabled and self._same_actor_critic:\n # here we can share reference model between Actor and Critic\n add_multiple_lora(self._reference, lora_cfg=lora_cfg, num=2)\n else:\n # Actor and critic cannot be shared, we fallback to the default behavior\n self._actor.setup_finetuning(model_cfg=model_cfg)\n self._critic.setup_finetuning(model_cfg=model_cfg)\n else:\n if self._lora_enabled:\n add_lora(self._reward, lora_cfg=lora_cfg)\n add_lora(self._reference, lora_cfg=lora_cfg)\n else:\n self._critic.setup_finetuning(model_cfg=model_cfg)\n self._actor.setup_finetuning(model_cfg=model_cfg)\n trainable_parameter_summary(self.actor, show_names=False, tag=\"Actor\")\n trainable_parameter_summary(self.critic, show_names=False, tag=\"Critic\")\n\n @property\n def share_actor_critic(self) -> bool:\n \"\"\"Whether Actor and Critic models are shared.\"\"\"\n return self._share_actor_critic\n\n @property\n def share_critic_reward(self) -> bool:\n \"\"\"Whether Critic and Reward models are shared.\"\"\"\n return self._share_critic_reward\n\n @property\n def lora_enabled(self) -> bool:\n \"\"\"Whether LoRA is enabled.\"\"\"\n return self._lora_enabled\n\n @property\n def actor(self) -> ActorModel: # noqa: D102\n if self._share_actor_critic:\n enable_lora(self._reference)\n return select_lora(self._reference, index=0)\n elif self._lora_enabled and self._init_critic_with_reward:\n enable_lora(self._reference)\n return self._reference\n else:\n return self._actor\n\n @actor.setter\n def actor(self, actor: ActorModel) -> None:\n if self._lora_enabled and (self._share_actor_critic or self._init_critic_with_reward):\n self._reference = actor\n else:\n self._actor = actor\n\n @property\n def critic(self) -> CriticModel: # noqa: D102\n if self._share_actor_critic:\n enable_lora(self._reference)\n return select_lora(self._reference, index=1)\n elif self._share_critic_reward:\n enable_lora(self._reward)\n self._reward.disable_bias_gain()\n return self._reward\n else:\n return self._critic\n\n @critic.setter\n def critic(self, critic: CriticModel) -> None:\n if self._share_actor_critic:\n self._reference = critic\n elif self._share_critic_reward:\n self._reward = critic\n else:\n self._critic = critic\n\n @property\n def reference(self) -> ActorModel: # noqa: D102\n if self._share_actor_critic and self._lora_enabled:\n disable_lora(self._reference)\n\n return self._reference\n\n @reference.setter\n def reference(self, reference: ActorModel) -> None:\n self._reference = reference\n\n @property\n def reward(self) -> RewardModel: # noqa: D102\n if self._share_critic_reward:\n disable_lora(self._reward)\n self._reward.enable_bias_gain()\n return self._reward\n\n @reward.setter\n def reward(self, reward: RewardModel) -> None:\n self._reward = reward" }, { "identifier": "TextDataset", "path": "sheeprlhf/data/base.py", "snippet": "class TextDataset(torch.utils.data.Dataset):\n \"\"\"A simple text dataset for loading data from a pandas dataframe.\"\"\"\n\n def __init__(self, dataframe_path: str):\n self.dataframe = pd.read_pickle(dataframe_path).reset_index(drop=True)\n\n def __getitem__(self, index):\n row = self.dataframe.iloc[index].to_dict()\n return row\n\n def __len__(self):\n return len(self.dataframe)" }, { "identifier": "LeftPadCollate", "path": "sheeprlhf/data/collate.py", "snippet": "class LeftPadCollate:\n \"\"\"Data collator used for training.\n\n It is used when the data is left padded.\n \"\"\"\n\n def __init__(self, dim=1, pad_value=0, ignore_index=-1):\n self.dim = dim\n self.pad_value = pad_value\n self.ignore_index = ignore_index\n\n def __call__(self, batch): # noqa: D102\n input_ids = [list_to_tensor(item[\"chosen_input_ids\"])[: item[\"prompt_len\"]] for item in batch]\n # Use PyTorch's pad_sequence function\n # convert into left padding\n reversed_input_ids = [i.flip(dims=[0]) for i in input_ids]\n input_ids = pad_sequence(reversed_input_ids, batch_first=True, padding_value=self.pad_value).flip(dims=[1])\n attention_mask = input_ids.ne(self.pad_value).type(torch.int64)\n\n return {\n \"prompt_input_ids\": input_ids,\n \"prompt_attention_mask\": attention_mask,\n }" }, { "identifier": "policy_loss", "path": "sheeprlhf/loss/ppo.py", "snippet": "def policy_loss(\n log_probs: torch.Tensor,\n old_log_probs: torch.Tensor,\n advantages: torch.Tensor,\n clip_coeff: float,\n action_mask: Optional[torch.Tensor] = None,\n) -> torch.Tensor:\n \"\"\"Compute the policy loss for PPO.\"\"\"\n log_ratio = (log_probs - old_log_probs) * action_mask\n ratio = torch.exp(log_ratio)\n policy_loss_1 = -advantages * ratio\n policy_loss_2 = -advantages * torch.clamp(ratio, 1 - clip_coeff, 1 + clip_coeff)\n policy_loss = torch.max(policy_loss_1, policy_loss_2)\n if action_mask is not None:\n policy_loss = torch.sum(policy_loss * action_mask) / action_mask.sum()\n else:\n policy_loss = policy_loss.mean()\n return policy_loss" }, { "identifier": "value_loss", "path": "sheeprlhf/loss/ppo.py", "snippet": "def value_loss(\n values: torch.Tensor,\n old_values: torch.Tensor,\n returns: torch.Tensor,\n clip_coeff: float,\n action_mask: Optional[torch.Tensor] = None,\n) -> torch.Tensor:\n \"\"\"Compute the value loss for PPO.\"\"\"\n values_clipped = torch.clamp(values, old_values - clip_coeff, old_values + clip_coeff)\n value_loss1 = F.mse_loss(values, returns, reduction=\"none\")\n value_loss2 = F.mse_loss(values_clipped, returns, reduction=\"none\")\n value_loss = torch.max(value_loss1, value_loss2)\n if action_mask is not None:\n value_loss = torch.sum(value_loss * action_mask) / action_mask.sum()\n else:\n value_loss = value_loss.mean()\n return value_loss" }, { "identifier": "ActorModel", "path": "sheeprlhf/model/actor.py", "snippet": "class ActorModel(CasualModel):\n \"\"\"Actor model for PPO and DPO algorithms.\"\"\"\n\n def __init__(self, model_cfg: ModelConfig):\n super().__init__(model_cfg=model_cfg)\n\n def forward(self, **kwargs): # noqa: D102\n input_ids = kwargs[\"input_ids\"]\n if self.training and not self.model_cfg.use_attention_mask:\n kwargs.pop(\"attention_mask\")\n out = self.model(**kwargs)\n # Model predicts next token log probability here.\n actor_log_probs = F.log_softmax(out.logits[:, :-1, :], dim=-1)\n selected_actor_log_probs = actor_log_probs.gather(dim=-1, index=input_ids[:, 1:].unsqueeze(-1))\n return selected_actor_log_probs.squeeze(-1)" }, { "identifier": "DataConfig", "path": "sheeprlhf/structure/data.py", "snippet": "class DataConfig:\n \"\"\"The main class for processing data for the RLHF algorithm.\n\n Args:\n config_name: The name of the data configuration.\n dataset_name: The name of the dataset to load.\n root_dir: The directory where the processed data will be saved.\n tokenizer_name: The name of the tokenizer to use.\n max_length: The maximum length of the input tokens. Defaults to 512.\n max_prompt_length: The maximum length of the prompt tokens. Defaults to 512.\n num_samples: The number of samples to use. Defaults to None.\n ignore_index: The index to use for ignored tokens. Defaults to -1.\n remove_same_responses: Whether to remove samples with the same response. Defaults to True.\n remove_same_inputs: Whether to remove samples with the same input. Defaults to True.\n minimum_response_length: The minimum length of the response tokens. Defaults to 2.\n save_skipped_examples: Whether to save skipped examples. Defaults to False.\n validation_split: The validation split. Defaults to 0.1.\n reward_model_split: The reward model split. Defaults to 0.5.\n shuffle: Whether to shuffle the dataset. Defaults to True.\n seed: The random seed. Defaults to 42.\n split_names: The names of the splits. Defaults to (\"train\", \"val\", \"test\").\n \"\"\"\n\n _target_: str = \"sheeprlhf.data.DataProcessor\"\n config_name: str = MISSING\n dataset_name: str = MISSING\n root_dir: str = Path(\"./rlhf_data\")\n tokenizer_name: str = II(\"model.repo_name\")\n max_length: int = 256\n max_prompt_length: int = 128\n num_samples: Optional[int] = None\n ignore_index: int = -1\n remove_same_responses: bool = True\n remove_same_inputs: bool = True\n minimum_response_length: int = 5\n save_skipped_examples: bool = False\n shuffle: bool = True\n seed: int = II(\"seed\")\n validation_split: float = 0.1\n reward_model_split: float = 0.5\n split_names: Tuple[str] = (\"train\", \"test\")\n dry_run: bool = II(\"dry_run\")" }, { "identifier": "GenConfig", "path": "sheeprlhf/structure/generation.py", "snippet": "class GenConfig:\n \"\"\"The default configuration for the generator.\"\"\"\n\n # We cannot call this GenerationConfig because it will\n # conflict with transformers.GenerationConfig\n max_new_tokens: int = 128\n num_beams: int = 1\n do_sample: bool = True\n top_k: int = 50\n top_p: float = 1.0\n temperature: float = 1.0\n num_return_sequences: int = 1" }, { "identifier": "ModelConfig", "path": "sheeprlhf/structure/model.py", "snippet": "class ModelConfig:\n \"\"\"A generic configuration for models.\"\"\"\n\n config_name: str = MISSING\n repo_name: Optional[str] = None\n embedding_dim_name: Optional[str] = None\n transformer_name: Optional[str] = None\n casual: bool = True\n freeze_transformer: bool = False\n disable_dropout: bool = False\n library_cfg: HuggingFaceConfig = HuggingFaceConfig()\n finetune_mode: FINETUNE_MODE = FINETUNE_MODE.ALL\n lora_cfg: Optional[LORAConfig] = None\n use_attention_mask: bool = True\n fabric_empty_init: bool = True\n\n def __post_init__(self):\n if isinstance(self.finetune_mode, str):\n self.finetune_mode = FINETUNE_MODE(self.finetune_mode)" }, { "identifier": "PPOConfig", "path": "sheeprlhf/structure/task.py", "snippet": "class PPOConfig(TrainTaskConfig):\n \"\"\"Configuration class for PPO algorithm.\n\n Args:\n _name_: Name of the algorithm. Default is \"ppo\".\n rollout_size: Rollout size for PPO. For every training iteration this number of samples will\n be sampled from dataset and each will be used for generating response.\n rollout_mini_batch_size: Rollout mini batch size for PPO. This number is useful when the\n GPU memory is not sufficient for running all generation code with single batch.\n ppo_epochs: Number of ppo epochs to training. `ppo_step` function will be called `ppo_epochs` times\n normalize_rewards: Whether to whiten rewards\n normalize_advantages: Whether to whiten advantages\n adaptive_kl_coeff: Whether to use adaptively changing KL divergence coefficient\n clip_rewards: Whether to clip rewards\n reward_clip_value: Reward clipping value\n init_kl_coeff: KL divergence coefficient for comparing actor model with reference model.\n Higher value means more trust to reference model.\n target_kl_coeff: Target KL divergence coefficient\n clip_coeff: Clip coefficient for PPO loss\n vf_coeff: Value loss coefficient for PPO loss\n gae_gamma: Discount factor for GAE(Generalized Advantage Estimation)\n gae_lambd: Lambda for GAE(Generalized Advantage Estimation)\n sft_experiment_dir: Path to supervised finetuning experiment directory. Latest checkpoint will be loaded.\n rm_experiment_dir: Path to reward modelling experiment directory. Latest checkpoint will be loaded.\n sft_model_name: Name of the model to load from supervised finetuning experiment directory.\n If not provided, latest checkpoint will be loaded.\n rm_model_name: Name of the model to load from reward modelling experiment directory.\n If not provided, latest checkpoint will be loaded.\n actor_learning_rate: Learning rate for actor optimizer\n critic_learning_rate: Learning rate for critic optimizer\n init_critic_with_reward: Whether to initialize critic with reward model checkpoint or not.\n \"\"\"\n\n config_name: str = \"ppo\"\n rollout_size: int = 128\n rollout_mini_batch_size: int = 32\n ppo_epochs: int = 1\n normalize_rewards: bool = True\n normalize_advantages: bool = True\n adaptive_kl_coeff: bool = False\n clip_rewards: bool = True\n reward_clip_value: float = 5.0\n init_kl_coeff: float = 0.1\n target_kl_coeff: float = 0.1\n clip_coeff: float = 0.2\n vf_coeff: float = 0.1\n gae_gamma: float = 1.0\n gae_lambd: float = 0.95\n sft_experiment_dir: str = II(\"sft_experiment_dir\")\n rm_experiment_dir: str = II(\"rm_experiment_dir\")\n sft_model_name: Optional[str] = None\n rm_model_name: Optional[str] = None\n actor_learning_rate: float = 1e-6\n critic_learning_rate: float = 1e-6\n init_critic_with_reward: bool = True" }, { "identifier": "prepare_generation_config", "path": "sheeprlhf/utils/data.py", "snippet": "def prepare_generation_config(\n tokenizer: PreTrainedTokenizer, model_cfg: ModelConfig, gen_cfg: GenConfig, fabric: lightning.Fabric\n) -> Dict[str, Any]:\n \"\"\"Creates generation config for Hugginface models.\n\n In this function, we try to solve token problems for different models.\n \"\"\"\n gen_cfg_dict = asdict(gen_cfg)\n try:\n generation_config = GenerationConfig.from_pretrained(model_cfg.repo_name, **gen_cfg_dict)\n except EnvironmentError:\n # If the model does not have `generation_config.json` file, we create from scratch\n fabric.print(\"`generation_config.json` not found, creating `GenerationConfig` from scratch\")\n generation_config = GenerationConfig(**gen_cfg_dict)\n generation_config.pad_token_id = tokenizer.pad_token_id\n generation_config.eos_token_id = tokenizer.eos_token_id\n generation_config.bos_token_id = tokenizer.bos_token_id\n return generation_config" }, { "identifier": "validate_dataset", "path": "sheeprlhf/utils/data.py", "snippet": "def validate_dataset(fabric: lightning.Fabric, data_cfg: DataConfig) -> DataProcessor:\n \"\"\"Dataset validator.\n\n Validates the dataset for checking if it is required to re-create\n all preprocessing steps using tokenizers.\n \"\"\"\n os.environ.setdefault(\"TOKENIZERS_PARALLELISM\", \"true\")\n data_processor: DataProcessor = instantiate_from_config(data_cfg)\n full_path = data_processor.full_path\n create_dataset: bool = True\n if os.path.isdir(full_path):\n config_path = full_path / \"config.yaml\"\n if not config_path.exists():\n fabric.print(f\"Config file not found at {config_path} for the given dataset {data_cfg.config_name}\")\n fabric.print(\"Dataset will be recreated and previous files will be deleted.\")\n else:\n open_config = OmegaConf.load(config_path)\n loaded_dataset_cfg = DataConfig(**open_config)\n current_tokenizer = prepare_tokenizer(data_cfg.tokenizer_name)\n loaded_tokenizer = prepare_tokenizer(loaded_dataset_cfg.tokenizer_name)\n\n if type(current_tokenizer) != type(loaded_tokenizer):\n fabric.print(\"Tokenizer type changed.\")\n fabric.print(f\"Was {type(loaded_tokenizer)} now {type(current_tokenizer)}\")\n fabric.print(\"New dataset will be recreated and previous files will be deleted.\")\n create_dataset = True\n elif data_cfg != loaded_dataset_cfg:\n diffs = {}\n for k, v in asdict(data_cfg).items():\n if v != getattr(loaded_dataset_cfg, k):\n diffs[k] = (v, getattr(loaded_dataset_cfg, k))\n fabric.print(\"Dataset config changed.\")\n\n fabric.print(\"\\n\".join([f\"{k} was {v[0]} now {v[1]}\" for k, v in diffs.items()]))\n fabric.print(\"New dataset will be recreated and previous files will be deleted.\")\n create_dataset = True\n else:\n fabric.print(\"Dataset already exists. Skipping dataset creation.\")\n create_dataset = False\n if create_dataset:\n shutil.rmtree(full_path)\n # This disables FastTokenizer's parallelism for multiprocessing with dataloaders\n # TODO: check if can be avoided\n os.environ.setdefault(\"TOKENIZERS_PARALLELISM\", \"false\")\n data_processor.tokenizer = prepare_tokenizer(data_cfg.tokenizer_name)\n if create_dataset and fabric.is_global_zero:\n fabric.print(f\"Creating new dataset in {full_path}\")\n data_processor.process()\n OmegaConf.save(data_cfg, full_path / \"config.yaml\")\n fabric.barrier()\n\n return data_processor" }, { "identifier": "create_tensorboard_logger", "path": "sheeprlhf/utils/helper.py", "snippet": "def create_tensorboard_logger(\n fabric: Fabric, cfg: Dict[str, Any], override_log_level: bool = False\n) -> Tuple[Optional[TensorBoardLogger]]:\n \"\"\"Creates tensorboard logger.\n\n Set logger only on rank-0 but share the logger directory: since\n we don't know. what is happening during the `fabric.save()` method,\n at least we assure that all ranks save under the same named folder.\n As a plus, rank-0 sets the time uniquely for everyone.\n \"\"\"\n # Set logger only on rank-0 but share the logger directory: since we don't know\n # what is happening during the `fabric.save()` method, at least we assure that all\n # ranks save under the same named folder.\n # As a plus, rank-0 sets the time uniquely for everyone\n logger = None\n if fabric.is_global_zero:\n root_dir = os.path.join(\"logs\", \"runs\", cfg.root_dir)\n if override_log_level or cfg.metric.log_level > 0:\n logger = TensorBoardLogger(root_dir=root_dir, name=cfg.run_name)\n return logger" }, { "identifier": "get_log_dir", "path": "sheeprlhf/utils/helper.py", "snippet": "def get_log_dir(fabric: Fabric, root_dir: str, run_name: str, share: bool = True) -> str:\n \"\"\"Return and, if necessary, create the log directory.\n\n If there are more than one processes, the rank-0 process shares\n the directory to the others\n (if the `share` parameter is set to `True`).\n\n Args:\n fabric: the fabric instance.\n root_dir: the root directory of the experiment.\n run_name: the name of the experiment.\n share: whether or not to share the `log_dir` among processes.\n\n Returns:\n The log directory of the experiment.\n \"\"\"\n world_collective = TorchCollective()\n if fabric.world_size > 1 and share:\n world_collective.setup()\n world_collective.create_group()\n if fabric.is_global_zero:\n # If the logger was instantiated, then take the log_dir from it\n if len(fabric.loggers) > 0:\n log_dir = fabric.logger.log_dir\n else:\n # Otherwise the rank-zero process creates the log_dir\n save_dir = os.path.join(\"logs\", \"runs\", root_dir, run_name)\n fs = get_filesystem(root_dir)\n try:\n listdir_info = fs.listdir(save_dir)\n existing_versions = []\n for listing in listdir_info:\n d = listing[\"name\"]\n bn = os.path.basename(d)\n if _is_dir(fs, d) and bn.startswith(\"version_\"):\n dir_ver = bn.split(\"_\")[1].replace(\"/\", \"\")\n existing_versions.append(int(dir_ver))\n version = 0 if len(existing_versions) == 0 else max(existing_versions) + 1\n log_dir = os.path.join(save_dir, f\"version_{version}\")\n except OSError:\n warnings.warn(\"Missing logger folder: %s\", save_dir, stacklevel=2)\n log_dir = os.path.join(save_dir, f\"version_{0}\")\n\n os.makedirs(log_dir, exist_ok=True)\n if fabric.world_size > 1 and share:\n world_collective.broadcast_object_list([log_dir], src=0)\n else:\n data = [None]\n world_collective.broadcast_object_list(data, src=0)\n log_dir = data[0]\n return log_dir" }, { "identifier": "log_text", "path": "sheeprlhf/utils/helper.py", "snippet": "@rank_zero_only\ndef log_text(fabric: lightning.Fabric, text: str, name: str, step: int):\n \"\"\"Wrapper function to log text to tensorboard.\"\"\"\n if fabric.logger is not None:\n if isinstance(fabric.logger, lightning.fabric.loggers.tensorboard.TensorBoardLogger):\n fabric.logger.experiment.add_text(name, text, step)\n else:\n warnings.warn(f\"Logging text is not supported for {type(fabric.logger)}\", stacklevel=2)" }, { "identifier": "instantiate_from_config", "path": "sheeprlhf/utils/hydra.py", "snippet": "def instantiate_from_config(config: Any, *args, **kwargs):\n \"\"\"Wrapper function to instantiate objects from Hydra config.\"\"\"\n config_copy = deepcopy(config)\n if is_dataclass(config_copy):\n config_copy = asdict(config_copy)\n if isinstance(config_copy, dict) and \"config_name\" in config_copy:\n config_copy.pop(\"config_name\")\n return instantiate(config_copy, *args, **kwargs)" }, { "identifier": "PPOMetricManager", "path": "sheeprlhf/utils/metric.py", "snippet": "class PPOMetricManager(MetricManager): # noqa: D101\n train_actor_loss: LastValueMetric\n train_critic_loss: LastValueMetric\n train_reward_mean: LastValueMetric\n train_kl_div_mean: LastValueMetric\n info_lr: LastValueMetric\n info_ppo_time: LastValueMetric\n info_rollout_time: LastValueMetric\n info_kl_coeff: LastValueMetric\n info_actor_grad_norm: LastValueMetric\n info_critic_grad_norm: LastValueMetric\n debug_reward_scores: StatsMetric\n debug_advantages: StatsMetric\n debug_returns: StatsMetric" }, { "identifier": "compute_grad_norm", "path": "sheeprlhf/utils/model.py", "snippet": "def compute_grad_norm(model: torch.nn.Module) -> float: # noqa: D103\n total_norm = 0\n parameters = [p for p in model.parameters() if p.grad is not None and p.requires_grad]\n for p in parameters:\n param_norm = p.grad.detach().cpu().data.norm(2)\n total_norm += param_norm.item() ** 2\n total_norm = total_norm**0.5\n return total_norm" }, { "identifier": "prepare_optimizer_parameters", "path": "sheeprlhf/utils/model.py", "snippet": "def prepare_optimizer_parameters(model: torch.nn.Module, weight_decay: float) -> List[Dict[str, Any]]:\n \"\"\"Taken from https://github.com/karpathy/nanoGPT.\"\"\"\n param_dict = {pn: p for pn, p in model.named_parameters()}\n # filter out those that do not require grad\n param_dict = {pn: p for pn, p in param_dict.items() if p.requires_grad}\n # create optim groups. Any parameters that is 2D will be weight decayed, otherwise no.\n # i.e. all weight tensors in matmuls + embeddings decay, all biases and layernorms don't.\n decay_params = [p for n, p in param_dict.items() if p.dim() >= 2]\n nodecay_params = [p for n, p in param_dict.items() if p.dim() < 2]\n optim_groups = [\n {\"params\": decay_params, \"weight_decay\": weight_decay},\n {\"params\": nodecay_params, \"weight_decay\": 0.0},\n ]\n num_decay_params = sum(p.numel() for p in decay_params)\n num_nodecay_params = sum(p.numel() for p in nodecay_params)\n\n return optim_groups, num_decay_params, num_nodecay_params" }, { "identifier": "AdaptiveKLController", "path": "sheeprlhf/utils/ppo.py", "snippet": "class AdaptiveKLController:\n \"\"\"A class for controlling the KL divergence between the old and new policy in PPO.\n\n Parameters:\n init_kl_coeff : float\n The initial value for the KL coefficient.\n target_kl_coeff : float\n The target value for the KL coefficient.\n kl_horizon : float\n The number of steps over which to adjust the KL coefficient.\n clip_range : float\n The maximum amount by which to clip the proportional error.\n\n Attributes:\n value : float\n The current value of the KL coefficient.\n \"\"\"\n\n def __init__(self, init_kl_coeff: float, target_kl_coeff: float, kl_horizon: float, clip_range: float):\n self.value = init_kl_coeff\n self.target_kl_coeff = target_kl_coeff\n self.kl_horizon = kl_horizon\n self.clip_range = clip_range\n\n def update(self, current: int, n_steps: int):\n \"\"\"Update the value of the PPO object based on the current KL divergence and the number of steps taken.\n\n Args:\n current (float): The current KL divergence.\n n_steps (int): The number of steps taken.\n \"\"\"\n target = self.target_kl_coeff\n proportional_error = torch.clamp(current / target - 1, -self.clip_range, self.clip_range)\n mult = 1 + proportional_error * n_steps / self.kl_horizon\n self.value *= mult" }, { "identifier": "FixedKLController", "path": "sheeprlhf/utils/ppo.py", "snippet": "class FixedKLController:\n \"\"\"Dummy KL controller that does not update.\"\"\"\n\n def __init__(self, kl_coeff):\n self.value = kl_coeff\n\n def update(self, current, n_steps): # noqa: D102\n pass" }, { "identifier": "collect_rollout", "path": "sheeprlhf/utils/ppo.py", "snippet": "@torch.no_grad()\ndef collect_rollout(\n batch: Dict[str, torch.Tensor],\n agent: PPOAgent,\n kl_controller: Union[FixedKLController, AdaptiveKLController],\n generation_config: GenerationConfig,\n task_cfg: PPOConfig,\n tokenizer: PreTrainedTokenizer,\n fabric: lightning.Fabric,\n metrics: PPOMetricManager,\n) -> Dict[str, torch.Tensor]:\n \"\"\"Collects rollout data for PPO algorithm.\n\n Args:\n batch: The rollout batch data\n agent: The PPO agent.\n kl_controller: The KL controller for penalty.\n generation_config: The generation configuration.\n task_cfg: The PPO configuration used for training\n tokenizer: The model tokenizer.\n fabric: The fabric object.\n metrics: The metric manager for training.\n\n Returns:\n The rollout data.\n \"\"\"\n # We have the batch as dictionary let's create tensordict\n # so we can create dataloader with Fabric that transfers the data\n # to correct devices.\n batch_tdict = make_tensordict(batch)\n mini_batch_dataloader = DataLoader(\n batch_tdict,\n shuffle=False,\n batch_size=task_cfg.rollout_mini_batch_size,\n collate_fn=lambda x: x,\n num_workers=0,\n drop_last=False,\n )\n mini_batch_dataloader = fabric.setup_dataloaders(mini_batch_dataloader, use_distributed_sampler=False)\n rollout_dict_list = []\n\n # We use first generated token index - 1 to obtain correct logprobs.\n # Here we have batch of data fed into all models we have here is the input looks like:\n # Assuming padding tokens are `O` and input tokens are `I`\n # O O I I I\n # O O O I I (left padded batch)\n # O I I I I\n # After responses are generated we have new data assuming response tokens are `R`\n # O O I I I R R R O O O\n # O O O I I R R R R R O (padded from right side to longest text)\n # O I I I I R R R R R R\n start_token_idx = batch[\"prompt_input_ids\"].size(1) - 1\n for i, mini_batch in enumerate(mini_batch_dataloader):\n prompt_input_ids = mini_batch[\"prompt_input_ids\"]\n prompt_attention_mask = mini_batch[\"prompt_attention_mask\"]\n data = {\"input_ids\": prompt_input_ids, \"attention_mask\": prompt_attention_mask}\n\n input_ids = agent.actor.generate(**data, generation_config=generation_config)\n max_len_diff = generation_config.max_new_tokens - (input_ids.size(1) - prompt_input_ids.size(1))\n if max_len_diff > 0:\n input_ids = torch.nn.functional.pad(input_ids, (0, max_len_diff), value=tokenizer.pad_token_id)\n attention_masks = (input_ids != generation_config.pad_token_id).int()\n\n data = {\"input_ids\": input_ids, \"attention_mask\": attention_masks}\n # for logprobs we already omit the last tokens from computation\n actor_log_probs = agent.actor(**data)[:, start_token_idx:]\n ref_log_probs = agent.reference(**data)[:, start_token_idx:]\n # We need to also do the same for value and reward outputs\n values = agent.critic(**data)[:, start_token_idx:-1]\n reward_outputs = agent.reward(**data)[:, start_token_idx:-1]\n\n mini_batch_rollout = {\n \"input_ids\": input_ids, # (B, T) (B, (prompt + generated))\n \"attention_mask\": attention_masks, # (B, T) (B, (prompt + generated))\n \"actor_log_probs\": actor_log_probs, # (B, num_new_tokens)\n \"ref_log_probs\": ref_log_probs, # (B, num_new_tokens)\n \"values\": values, # (B, num_new_tokens)\n \"reward_outputs\": reward_outputs, # (B, num_new_tokens)\n }\n mini_batch_tdict = make_tensordict(mini_batch_rollout).cpu()\n rollout_dict_list.append(mini_batch_tdict)\n if i == 0:\n sample_from_rollout = tokenizer.decode(input_ids[0], skip_special_tokens=True)\n\n rollout = torch.cat(rollout_dict_list, 0)\n action_mask = rollout[\"attention_mask\"][:, start_token_idx:-1].int()\n reward_outputs = rollout.pop(\"reward_outputs\")\n # we already removed the last token from action mask\n # we dont need to remove it from last_token_idx\n last_token_idx = torch.argmax(torch.cumsum(action_mask, dim=1) * action_mask, dim=1, keepdim=True)\n reward_scores = torch.gather(reward_outputs, dim=-1, index=last_token_idx).squeeze(-1)\n kl_div = rollout[\"actor_log_probs\"] - rollout[\"ref_log_probs\"]\n\n mean_kl_div = masked_mean(kl_div, action_mask).mean()\n if task_cfg.clip_rewards:\n torch.clip_(reward_scores, -task_cfg.reward_clip_value, task_cfg.reward_clip_value)\n\n if task_cfg.normalize_rewards:\n # we normalize the reward but do not shift the mean\n # TODO: Does it really important to normalize the rewards?\n reward_scores = normalize(reward_scores, shift_mean=False)\n\n # Rewards are made of two components:\n # 1. Per token kl divergence\n # 2. Last token reward\n # Combination of these two component creates the reward signal\n rewards = kl_div.detach().clone() * -kl_controller.value\n rewards.scatter_add_(dim=1, index=last_token_idx, src=reward_scores.unsqueeze(-1))\n values = rollout[\"values\"]\n\n advantages, returns = compute_advantages_and_returns(\n rewards=rewards * action_mask,\n values=values * action_mask,\n gamma=task_cfg.gae_gamma,\n lambd=task_cfg.gae_lambd,\n )\n rollout[\"advantages\"] = advantages\n rollout[\"returns\"] = returns\n kl_controller.update(mean_kl_div, rollout[\"input_ids\"].size(0))\n metrics.train_kl_div_mean.update(mean_kl_div.item())\n metrics.train_reward_mean.update(reward_scores.mean().item())\n metrics.debug_reward_scores(reward_scores)\n metrics.debug_advantages(advantages)\n metrics.debug_returns(returns)\n\n return rollout, sample_from_rollout" }, { "identifier": "masked_normalize", "path": "sheeprlhf/utils/ppo.py", "snippet": "def masked_normalize( # noqa: D103\n tensor: torch.Tensor, mask: torch.Tensor, shift_mean: bool = True, dim: int = 1, eps: float = 1e-8\n) -> torch.Tensor:\n tensor = tensor * mask\n mean = masked_mean(tensor, mask, dim=dim)\n mean_centered = tensor - mean\n var = masked_mean(mean_centered**2, mask, dim=dim)\n normalized = mean_centered * var.clamp(min=eps).rsqrt()\n if not shift_mean:\n normalized += mean\n return normalized" }, { "identifier": "register_task", "path": "sheeprlhf/utils/registry.py", "snippet": "def register_task():\n \"\"\"Task registery decorator.\"\"\"\n\n def inner_decorator(fn):\n return _register_task(fn)\n\n return inner_decorator" } ]
import copy import time import torch from pathlib import Path from typing import Dict from lightning import Fabric from torch.utils.data import DataLoader from tqdm import tqdm from transformers import GenerationConfig, PreTrainedTokenizer from sheeprlhf.agent.ppo import PPOAgent from sheeprlhf.data.base import TextDataset from sheeprlhf.data.collate import LeftPadCollate from sheeprlhf.loss.ppo import policy_loss, value_loss from sheeprlhf.model.actor import ActorModel from sheeprlhf.structure.data import DataConfig from sheeprlhf.structure.generation import GenConfig from sheeprlhf.structure.model import ModelConfig from sheeprlhf.structure.task import PPOConfig from sheeprlhf.utils.data import prepare_generation_config, validate_dataset from sheeprlhf.utils.helper import create_tensorboard_logger, get_log_dir, log_text from sheeprlhf.utils.hydra import instantiate_from_config from sheeprlhf.utils.metric import PPOMetricManager from sheeprlhf.utils.model import compute_grad_norm, prepare_optimizer_parameters from sheeprlhf.utils.ppo import AdaptiveKLController, FixedKLController, collect_rollout, masked_normalize from sheeprlhf.utils.registry import register_task
11,059
agent.setup_finetuning() agent.actor = fabric.setup_module(agent.actor) agent.critic = fabric.setup_module(agent.critic) if not agent.share_critic_reward: agent.reward = fabric.setup_module(agent.reward) if not agent.share_actor_critic and not agent.lora_enabled: agent.reference = fabric.setup_module(agent.reference) # Setup Generation Configs generation_config = prepare_generation_config( tokenizer=tokenizer, model_cfg=model_cfg, gen_cfg=gen_cfg, fabric=fabric, ) eval_gen_cfg = copy.deepcopy(gen_cfg) eval_gen_cfg.do_sample = False eval_generation_config = prepare_generation_config( tokenizer=tokenizer, model_cfg=model_cfg, gen_cfg=eval_gen_cfg, fabric=fabric, ) # Setup Optimizer Scheduler fabric models actor_trainable_params, _, _ = prepare_optimizer_parameters(agent.actor, weight_decay=optim_cfg.weight_decay) actor_optimizer = instantiate_from_config( optim_cfg, params=actor_trainable_params, _convert_="partial", ) actor_optimizer = fabric.setup_optimizers(actor_optimizer) critic_trainable_params, _, _ = prepare_optimizer_parameters(agent.critic, weight_decay=optim_cfg.weight_decay) critic_optimizer = instantiate_from_config( optim_cfg, params=critic_trainable_params, _convert_="partial", ) critic_optimizer = fabric.setup_optimizers(critic_optimizer) if fabric.is_global_zero: gen_text, score = generate( agent=agent, tokenizer=tokenizer, generation_config=eval_generation_config, example_prompt=example_prompt, device=fabric.device, ) log_text(fabric, gen_text, "info/example_sample", step=0) fabric.log("info/example_last_reward", score, step=0) num_training_steps = 2 if cfg.dry_run else task_cfg.epochs * len(train_dataloader) # KL Controller if task_cfg.adaptive_kl_coeff: kl_controller = AdaptiveKLController( init_kl_coef=task_cfg.init_kl_coeff, target=task_cfg.target_kl_coeff, kl_horizon=num_training_steps ) else: kl_controller = FixedKLController(kl_coeff=task_cfg.init_kl_coeff) fabric.print("Model Checkpoint interval: ", task_cfg.save_interval, "steps") fabric.print("Model Evaluation interval: ", task_cfg.eval_interval, "steps") iterator = tqdm(range(num_training_steps), disable=not fabric.is_global_zero) data_iterator = iter(train_dataloader) agent.reward.eval() for k in iterator: # Setup counters and data if k % len(train_dataloader) == 0 or data_iterator is None: data_iterator = iter(train_dataloader) is_accumulating = (k) % task_cfg.gradient_accumulation_steps != 0 last_step = k == num_training_steps - 1 # Setup batch data batch = next(data_iterator) max_prompt_length = batch["prompt_input_ids"].shape[1] agent.actor.eval() agent.critic.eval() t0 = time.time() rollout, sample_output = collect_rollout( batch=batch, agent=agent, generation_config=generation_config, kl_controller=kl_controller, task_cfg=task_cfg, tokenizer=tokenizer, fabric=fabric, metrics=metrics, ) time_rollout = time.time() - t0 rollout_dataloader = DataLoader( rollout, batch_size=task_cfg.micro_batch_size, shuffle=True, collate_fn=lambda x: x ) rollout_dataloader = fabric.setup_dataloaders(rollout_dataloader, use_distributed_sampler=False) agent.actor.train() agent.critic.train() for _ in range(task_cfg.ppo_epochs): accumulator_counter = 0 for micro_batch in rollout_dataloader: is_accumulating = (accumulator_counter) % task_cfg.gradient_accumulation_steps != 0 generated_data = { "input_ids": micro_batch["input_ids"], "attention_mask": micro_batch["attention_mask"], } old_log_probs = micro_batch["actor_log_probs"] old_values = micro_batch["values"] advantages = micro_batch["advantages"] returns = micro_batch["returns"] start_token_idx = max_prompt_length - 1 action_mask = micro_batch["attention_mask"][:, start_token_idx:-1].int() if task_cfg.normalize_advantages: advantages = masked_normalize(advantages, action_mask) with fabric.no_backward_sync(agent.actor, enabled=is_accumulating): log_probs = agent.actor(**generated_data)[:, start_token_idx:] # (B, num_new_tokens)
@torch.no_grad() def generate( # noqa: D103 agent: PPOAgent, tokenizer: PreTrainedTokenizer, generation_config: GenerationConfig, example_prompt: Dict[str, torch.Tensor], device: torch.device, ): generated_input_ids = agent.actor.module.generate( input_ids=example_prompt["input_ids"].to(device), attention_mask=example_prompt["attention_mask"].to(device), generation_config=generation_config, use_cache=True, ) prompt_length = example_prompt["input_ids"].shape[1] generated_attention_mask = (generated_input_ids != generation_config.pad_token_id).int() generated_data = {"input_ids": generated_input_ids, "attention_mask": generated_attention_mask} reward = agent.reward(**generated_data)[:, prompt_length:] action_mask = (generated_input_ids != generation_config.pad_token_id).int()[:, prompt_length:] last_token_idx = torch.argmax(torch.cumsum(action_mask, dim=1) * action_mask, dim=1, keepdim=True) reward_score = torch.gather(reward, dim=-1, index=last_token_idx).squeeze(-1) return tokenizer.decode(generated_input_ids[0], skip_special_tokens=True), reward_score.item() @register_task() def main(fabric: Fabric, cfg: Dict): # noqa: D103 task_cfg = PPOConfig(**cfg.task) model_cfg = ModelConfig(**cfg.model) data_cfg = DataConfig(**cfg.data) gen_cfg = GenConfig(**cfg.generation) optim_cfg = cfg.optim fabric.seed_everything(cfg.seed + fabric.global_rank) # Create TensorBoardLogger. This will create the logger only on the # rank-0 process logger = create_tensorboard_logger(fabric, cfg, override_log_level=True) if logger and fabric.is_global_zero: fabric._loggers = [logger] fabric.logger.log_hyperparams(cfg) log_dir = get_log_dir(fabric, cfg.root_dir, cfg.run_name) experiment_dir = Path(log_dir).parent # Setup Metrics metrics = PPOMetricManager(log_interval=task_cfg.log_interval).to(fabric.device) # Setup Dataloaders data_processor = validate_dataset(fabric, data_cfg) dataset_path = Path(data_processor.full_path) tokenizer = data_processor.tokenizer collator = LeftPadCollate(pad_value=tokenizer.pad_token_id, ignore_index=data_cfg.ignore_index) train_dataset = TextDataset(dataframe_path=dataset_path / "finetune_train.pkl") train_dataloader = DataLoader( train_dataset, shuffle=True, batch_size=task_cfg.micro_batch_size, collate_fn=collator, num_workers=task_cfg.num_workers, ) train_dataloader = fabric.setup_dataloaders(train_dataloader) example_prompt = torch.load(dataset_path / "example_prompt.pt") # Setup Model with fabric.init_module(empty_init=model_cfg.fabric_empty_init): agent = PPOAgent(model_cfg=model_cfg, task_cfg=task_cfg) agent.load_checkpoint(device=fabric.device) agent.setup_finetuning() agent.actor = fabric.setup_module(agent.actor) agent.critic = fabric.setup_module(agent.critic) if not agent.share_critic_reward: agent.reward = fabric.setup_module(agent.reward) if not agent.share_actor_critic and not agent.lora_enabled: agent.reference = fabric.setup_module(agent.reference) # Setup Generation Configs generation_config = prepare_generation_config( tokenizer=tokenizer, model_cfg=model_cfg, gen_cfg=gen_cfg, fabric=fabric, ) eval_gen_cfg = copy.deepcopy(gen_cfg) eval_gen_cfg.do_sample = False eval_generation_config = prepare_generation_config( tokenizer=tokenizer, model_cfg=model_cfg, gen_cfg=eval_gen_cfg, fabric=fabric, ) # Setup Optimizer Scheduler fabric models actor_trainable_params, _, _ = prepare_optimizer_parameters(agent.actor, weight_decay=optim_cfg.weight_decay) actor_optimizer = instantiate_from_config( optim_cfg, params=actor_trainable_params, _convert_="partial", ) actor_optimizer = fabric.setup_optimizers(actor_optimizer) critic_trainable_params, _, _ = prepare_optimizer_parameters(agent.critic, weight_decay=optim_cfg.weight_decay) critic_optimizer = instantiate_from_config( optim_cfg, params=critic_trainable_params, _convert_="partial", ) critic_optimizer = fabric.setup_optimizers(critic_optimizer) if fabric.is_global_zero: gen_text, score = generate( agent=agent, tokenizer=tokenizer, generation_config=eval_generation_config, example_prompt=example_prompt, device=fabric.device, ) log_text(fabric, gen_text, "info/example_sample", step=0) fabric.log("info/example_last_reward", score, step=0) num_training_steps = 2 if cfg.dry_run else task_cfg.epochs * len(train_dataloader) # KL Controller if task_cfg.adaptive_kl_coeff: kl_controller = AdaptiveKLController( init_kl_coef=task_cfg.init_kl_coeff, target=task_cfg.target_kl_coeff, kl_horizon=num_training_steps ) else: kl_controller = FixedKLController(kl_coeff=task_cfg.init_kl_coeff) fabric.print("Model Checkpoint interval: ", task_cfg.save_interval, "steps") fabric.print("Model Evaluation interval: ", task_cfg.eval_interval, "steps") iterator = tqdm(range(num_training_steps), disable=not fabric.is_global_zero) data_iterator = iter(train_dataloader) agent.reward.eval() for k in iterator: # Setup counters and data if k % len(train_dataloader) == 0 or data_iterator is None: data_iterator = iter(train_dataloader) is_accumulating = (k) % task_cfg.gradient_accumulation_steps != 0 last_step = k == num_training_steps - 1 # Setup batch data batch = next(data_iterator) max_prompt_length = batch["prompt_input_ids"].shape[1] agent.actor.eval() agent.critic.eval() t0 = time.time() rollout, sample_output = collect_rollout( batch=batch, agent=agent, generation_config=generation_config, kl_controller=kl_controller, task_cfg=task_cfg, tokenizer=tokenizer, fabric=fabric, metrics=metrics, ) time_rollout = time.time() - t0 rollout_dataloader = DataLoader( rollout, batch_size=task_cfg.micro_batch_size, shuffle=True, collate_fn=lambda x: x ) rollout_dataloader = fabric.setup_dataloaders(rollout_dataloader, use_distributed_sampler=False) agent.actor.train() agent.critic.train() for _ in range(task_cfg.ppo_epochs): accumulator_counter = 0 for micro_batch in rollout_dataloader: is_accumulating = (accumulator_counter) % task_cfg.gradient_accumulation_steps != 0 generated_data = { "input_ids": micro_batch["input_ids"], "attention_mask": micro_batch["attention_mask"], } old_log_probs = micro_batch["actor_log_probs"] old_values = micro_batch["values"] advantages = micro_batch["advantages"] returns = micro_batch["returns"] start_token_idx = max_prompt_length - 1 action_mask = micro_batch["attention_mask"][:, start_token_idx:-1].int() if task_cfg.normalize_advantages: advantages = masked_normalize(advantages, action_mask) with fabric.no_backward_sync(agent.actor, enabled=is_accumulating): log_probs = agent.actor(**generated_data)[:, start_token_idx:] # (B, num_new_tokens)
p_loss = policy_loss(
3
2023-10-31 12:02:02+00:00
16k
cpacker/MemGPT
memgpt/cli/cli_config.py
[ { "identifier": "logger", "path": "memgpt/log.py", "snippet": "" }, { "identifier": "utils", "path": "memgpt/utils.py", "snippet": "DEBUG = False\r\nADJECTIVE_BANK = [\r\n \"beautiful\",\r\n \"gentle\",\r\n \"angry\",\r\n \"vivacious\",\r\n \"grumpy\",\r\n \"luxurious\",\r\n \"fierce\",\r\n \"delicate\",\r\n \"fluffy\",\r\n \"radiant\",\r\n \"elated\",\r\n \"magnificent\",\r\n \"sassy\",\r\n \"ecstatic\",\r\n \"lustrous\",\r\n \"gleaming\",\r\n \"sorrowful\",\r\n \"majestic\",\r\n \"proud\",\r\n \"dynamic\",\r\n \"energetic\",\r\n \"mysterious\",\r\n \"loyal\",\r\n \"brave\",\r\n \"decisive\",\r\n \"frosty\",\r\n \"cheerful\",\r\n \"adorable\",\r\n \"melancholy\",\r\n \"vibrant\",\r\n \"elegant\",\r\n \"gracious\",\r\n \"inquisitive\",\r\n \"opulent\",\r\n \"peaceful\",\r\n \"rebellious\",\r\n \"scintillating\",\r\n \"dazzling\",\r\n \"whimsical\",\r\n \"impeccable\",\r\n \"meticulous\",\r\n \"resilient\",\r\n \"charming\",\r\n \"vivacious\",\r\n \"creative\",\r\n \"intuitive\",\r\n \"compassionate\",\r\n \"innovative\",\r\n \"enthusiastic\",\r\n \"tremendous\",\r\n \"effervescent\",\r\n \"tenacious\",\r\n \"fearless\",\r\n \"sophisticated\",\r\n \"witty\",\r\n \"optimistic\",\r\n \"exquisite\",\r\n \"sincere\",\r\n \"generous\",\r\n \"kindhearted\",\r\n \"serene\",\r\n \"amiable\",\r\n \"adventurous\",\r\n \"bountiful\",\r\n \"courageous\",\r\n \"diligent\",\r\n \"exotic\",\r\n \"grateful\",\r\n \"harmonious\",\r\n \"imaginative\",\r\n \"jubilant\",\r\n \"keen\",\r\n \"luminous\",\r\n \"nurturing\",\r\n \"outgoing\",\r\n \"passionate\",\r\n \"quaint\",\r\n \"resourceful\",\r\n \"sturdy\",\r\n \"tactful\",\r\n \"unassuming\",\r\n \"versatile\",\r\n \"wondrous\",\r\n \"youthful\",\r\n \"zealous\",\r\n \"ardent\",\r\n \"benevolent\",\r\n \"capricious\",\r\n \"dedicated\",\r\n \"empathetic\",\r\n \"fabulous\",\r\n \"gregarious\",\r\n \"humble\",\r\n \"intriguing\",\r\n \"jovial\",\r\n \"kind\",\r\n \"lovable\",\r\n \"mindful\",\r\n \"noble\",\r\n \"original\",\r\n \"pleasant\",\r\n \"quixotic\",\r\n \"reliable\",\r\n \"spirited\",\r\n \"tranquil\",\r\n \"unique\",\r\n \"venerable\",\r\n \"warmhearted\",\r\n \"xenodochial\",\r\n \"yearning\",\r\n \"zesty\",\r\n \"amusing\",\r\n \"blissful\",\r\n \"calm\",\r\n \"daring\",\r\n \"enthusiastic\",\r\n \"faithful\",\r\n \"graceful\",\r\n \"honest\",\r\n \"incredible\",\r\n \"joyful\",\r\n \"kind\",\r\n \"lovely\",\r\n \"merry\",\r\n \"noble\",\r\n \"optimistic\",\r\n \"peaceful\",\r\n \"quirky\",\r\n \"respectful\",\r\n \"sweet\",\r\n \"trustworthy\",\r\n \"understanding\",\r\n \"vibrant\",\r\n \"witty\",\r\n \"xenial\",\r\n \"youthful\",\r\n \"zealous\",\r\n \"ambitious\",\r\n \"brilliant\",\r\n \"careful\",\r\n \"devoted\",\r\n \"energetic\",\r\n \"friendly\",\r\n \"glorious\",\r\n \"humorous\",\r\n \"intelligent\",\r\n \"jovial\",\r\n \"knowledgeable\",\r\n \"loyal\",\r\n \"modest\",\r\n \"nice\",\r\n \"obedient\",\r\n \"patient\",\r\n \"quiet\",\r\n \"resilient\",\r\n \"selfless\",\r\n \"tolerant\",\r\n \"unique\",\r\n \"versatile\",\r\n \"warm\",\r\n \"xerothermic\",\r\n \"yielding\",\r\n \"zestful\",\r\n \"amazing\",\r\n \"bold\",\r\n \"charming\",\r\n \"determined\",\r\n \"exciting\",\r\n \"funny\",\r\n \"happy\",\r\n \"imaginative\",\r\n \"jolly\",\r\n \"keen\",\r\n \"loving\",\r\n \"magnificent\",\r\n \"nifty\",\r\n \"outstanding\",\r\n \"polite\",\r\n \"quick\",\r\n \"reliable\",\r\n \"sincere\",\r\n \"thoughtful\",\r\n \"unusual\",\r\n \"valuable\",\r\n \"wonderful\",\r\n \"xenodochial\",\r\n \"zealful\",\r\n \"admirable\",\r\n \"bright\",\r\n \"clever\",\r\n \"dedicated\",\r\n \"extraordinary\",\r\n \"generous\",\r\n \"hardworking\",\r\n \"inspiring\",\r\n \"jubilant\",\r\n \"kindhearted\",\r\n \"lively\",\r\n \"miraculous\",\r\n \"neat\",\r\n \"openminded\",\r\n \"passionate\",\r\n \"remarkable\",\r\n \"stunning\",\r\n \"truthful\",\r\n \"upbeat\",\r\n \"vivacious\",\r\n \"welcoming\",\r\n \"yare\",\r\n \"zealous\",\r\n]\r\nNOUN_BANK = [\r\n \"lizard\",\r\n \"firefighter\",\r\n \"banana\",\r\n \"castle\",\r\n \"dolphin\",\r\n \"elephant\",\r\n \"forest\",\r\n \"giraffe\",\r\n \"harbor\",\r\n \"iceberg\",\r\n \"jewelry\",\r\n \"kangaroo\",\r\n \"library\",\r\n \"mountain\",\r\n \"notebook\",\r\n \"orchard\",\r\n \"penguin\",\r\n \"quilt\",\r\n \"rainbow\",\r\n \"squirrel\",\r\n \"teapot\",\r\n \"umbrella\",\r\n \"volcano\",\r\n \"waterfall\",\r\n \"xylophone\",\r\n \"yacht\",\r\n \"zebra\",\r\n \"apple\",\r\n \"butterfly\",\r\n \"caterpillar\",\r\n \"dragonfly\",\r\n \"elephant\",\r\n \"flamingo\",\r\n \"gorilla\",\r\n \"hippopotamus\",\r\n \"iguana\",\r\n \"jellyfish\",\r\n \"koala\",\r\n \"lemur\",\r\n \"mongoose\",\r\n \"nighthawk\",\r\n \"octopus\",\r\n \"panda\",\r\n \"quokka\",\r\n \"rhinoceros\",\r\n \"salamander\",\r\n \"tortoise\",\r\n \"unicorn\",\r\n \"vulture\",\r\n \"walrus\",\r\n \"xenopus\",\r\n \"yak\",\r\n \"zebu\",\r\n \"asteroid\",\r\n \"balloon\",\r\n \"compass\",\r\n \"dinosaur\",\r\n \"eagle\",\r\n \"firefly\",\r\n \"galaxy\",\r\n \"hedgehog\",\r\n \"island\",\r\n \"jaguar\",\r\n \"kettle\",\r\n \"lion\",\r\n \"mammoth\",\r\n \"nucleus\",\r\n \"owl\",\r\n \"pumpkin\",\r\n \"quasar\",\r\n \"reindeer\",\r\n \"snail\",\r\n \"tiger\",\r\n \"universe\",\r\n \"vampire\",\r\n \"wombat\",\r\n \"xerus\",\r\n \"yellowhammer\",\r\n \"zeppelin\",\r\n \"alligator\",\r\n \"buffalo\",\r\n \"cactus\",\r\n \"donkey\",\r\n \"emerald\",\r\n \"falcon\",\r\n \"gazelle\",\r\n \"hamster\",\r\n \"icicle\",\r\n \"jackal\",\r\n \"kitten\",\r\n \"leopard\",\r\n \"mushroom\",\r\n \"narwhal\",\r\n \"opossum\",\r\n \"peacock\",\r\n \"quail\",\r\n \"rabbit\",\r\n \"scorpion\",\r\n \"toucan\",\r\n \"urchin\",\r\n \"viper\",\r\n \"wolf\",\r\n \"xray\",\r\n \"yucca\",\r\n \"zebu\",\r\n \"acorn\",\r\n \"biscuit\",\r\n \"cupcake\",\r\n \"daisy\",\r\n \"eyeglasses\",\r\n \"frisbee\",\r\n \"goblin\",\r\n \"hamburger\",\r\n \"icicle\",\r\n \"jackfruit\",\r\n \"kaleidoscope\",\r\n \"lighthouse\",\r\n \"marshmallow\",\r\n \"nectarine\",\r\n \"obelisk\",\r\n \"pancake\",\r\n \"quicksand\",\r\n \"raspberry\",\r\n \"spinach\",\r\n \"truffle\",\r\n \"umbrella\",\r\n \"volleyball\",\r\n \"walnut\",\r\n \"xylophonist\",\r\n \"yogurt\",\r\n \"zucchini\",\r\n \"asterisk\",\r\n \"blackberry\",\r\n \"chimpanzee\",\r\n \"dumpling\",\r\n \"espresso\",\r\n \"fireplace\",\r\n \"gnome\",\r\n \"hedgehog\",\r\n \"illustration\",\r\n \"jackhammer\",\r\n \"kumquat\",\r\n \"lemongrass\",\r\n \"mandolin\",\r\n \"nugget\",\r\n \"ostrich\",\r\n \"parakeet\",\r\n \"quiche\",\r\n \"racquet\",\r\n \"seashell\",\r\n \"tadpole\",\r\n \"unicorn\",\r\n \"vaccination\",\r\n \"wolverine\",\r\n \"xenophobia\",\r\n \"yam\",\r\n \"zeppelin\",\r\n \"accordion\",\r\n \"broccoli\",\r\n \"carousel\",\r\n \"daffodil\",\r\n \"eggplant\",\r\n \"flamingo\",\r\n \"grapefruit\",\r\n \"harpsichord\",\r\n \"impression\",\r\n \"jackrabbit\",\r\n \"kitten\",\r\n \"llama\",\r\n \"mandarin\",\r\n \"nachos\",\r\n \"obelisk\",\r\n \"papaya\",\r\n \"quokka\",\r\n \"rooster\",\r\n \"sunflower\",\r\n \"turnip\",\r\n \"ukulele\",\r\n \"viper\",\r\n \"waffle\",\r\n \"xylograph\",\r\n \"yeti\",\r\n \"zephyr\",\r\n \"abacus\",\r\n \"blueberry\",\r\n \"crocodile\",\r\n \"dandelion\",\r\n \"echidna\",\r\n \"fig\",\r\n \"giraffe\",\r\n \"hamster\",\r\n \"iguana\",\r\n \"jackal\",\r\n \"kiwi\",\r\n \"lobster\",\r\n \"marmot\",\r\n \"noodle\",\r\n \"octopus\",\r\n \"platypus\",\r\n \"quail\",\r\n \"raccoon\",\r\n \"starfish\",\r\n \"tulip\",\r\n \"urchin\",\r\n \"vampire\",\r\n \"walrus\",\r\n \"xylophone\",\r\n \"yak\",\r\n \"zebra\",\r\n]\r\ndef get_tool_call_id() -> str:\r\ndef assistant_function_to_tool(assistant_message: dict) -> dict:\r\ndef is_optional_type(hint):\r\ndef enforce_types(func):\r\n def wrapper(*args, **kwargs):\r\ndef annotate_message_json_list_with_tool_calls(messages: List[dict]):\r\ndef version_less_than(version_a: str, version_b: str) -> bool:\r\ndef create_random_username() -> str:\r\ndef verify_first_message_correctness(\r\n response: ChatCompletionResponse, require_send_message: bool = True, require_monologue: bool = False\r\n) -> bool:\r\n def contains_special_characters(s):\r\ndef is_valid_url(url):\r\ndef suppress_stdout():\r\ndef open_folder_in_explorer(folder_path):\r\n def find_class(self, module, name):\r\ndef count_tokens(s: str, model: str = \"gpt-4\") -> int:\r\ndef printd(*args, **kwargs):\r\ndef united_diff(str1, str2):\r\ndef parse_formatted_time(formatted_time):\r\ndef datetime_to_timestamp(dt):\r\ndef timestamp_to_datetime(ts):\r\ndef get_local_time_military():\r\ndef get_local_time_timezone(timezone=\"America/Los_Angeles\"):\r\ndef get_local_time(timezone=None):\r\ndef format_datetime(dt):\r\ndef parse_json(string):\r\ndef validate_function_response(function_response_string: any, strict: bool = False, truncate: bool = True) -> str:\r\ndef list_agent_config_files(sort=\"last_modified\"):\r\ndef list_human_files():\r\ndef list_persona_files():\r\ndef get_human_text(name: str, enforce_limit=True):\r\ndef get_persona_text(name: str, enforce_limit=True):\r\ndef get_human_text(name: str):\r\ndef get_schema_diff(schema_a, schema_b):\r\ndef validate_date_format(date_str):\r\ndef extract_date_from_timestamp(timestamp):\r\nclass OpenAIBackcompatUnpickler(pickle.Unpickler):\r" }, { "identifier": "MemGPTConfig", "path": "memgpt/config.py", "snippet": "class MemGPTConfig:\n config_path: str = os.path.join(MEMGPT_DIR, \"config\")\n anon_clientid: str = None\n\n # preset\n preset: str = DEFAULT_PRESET\n\n # persona parameters\n persona: str = DEFAULT_PERSONA\n human: str = DEFAULT_HUMAN\n agent: str = None\n\n # model parameters\n default_llm_config: LLMConfig = field(default_factory=LLMConfig)\n\n # embedding parameters\n default_embedding_config: EmbeddingConfig = field(default_factory=EmbeddingConfig)\n\n # database configs: archival\n archival_storage_type: str = \"chroma\" # local, db\n archival_storage_path: str = os.path.join(MEMGPT_DIR, \"chroma\")\n archival_storage_uri: str = None # TODO: eventually allow external vector DB\n\n # database configs: recall\n recall_storage_type: str = \"sqlite\" # local, db\n recall_storage_path: str = MEMGPT_DIR\n recall_storage_uri: str = None # TODO: eventually allow external vector DB\n\n # database configs: metadata storage (sources, agents, data sources)\n metadata_storage_type: str = \"sqlite\"\n metadata_storage_path: str = MEMGPT_DIR\n metadata_storage_uri: str = None\n\n # database configs: agent state\n persistence_manager_type: str = None # in-memory, db\n persistence_manager_save_file: str = None # local file\n persistence_manager_uri: str = None # db URI\n\n # version (for backcompat)\n memgpt_version: str = None\n\n # user info\n policies_accepted: bool = False\n\n def __post_init__(self):\n # ensure types\n # self.embedding_chunk_size = int(self.embedding_chunk_size)\n # self.embedding_dim = int(self.embedding_dim)\n # self.context_window = int(self.context_window)\n pass\n\n @staticmethod\n def generate_uuid() -> str:\n return uuid.UUID(int=uuid.getnode()).hex\n\n @classmethod\n def load(cls) -> \"MemGPTConfig\":\n # avoid circular import\n from memgpt.migrate import config_is_compatible, VERSION_CUTOFF\n\n if not config_is_compatible(allow_empty=True):\n error_message = \" \".join(\n [\n f\"\\nYour current config file is incompatible with MemGPT versions later than {VERSION_CUTOFF}.\",\n f\"\\nTo use MemGPT, you must either downgrade your MemGPT version (<= {VERSION_CUTOFF}) or regenerate your config using `memgpt configure`, or `memgpt migrate` if you would like to migrate old agents.\",\n ]\n )\n raise ValueError(error_message)\n\n config = configparser.ConfigParser()\n\n # allow overriding with env variables\n if os.getenv(\"MEMGPT_CONFIG_PATH\"):\n config_path = os.getenv(\"MEMGPT_CONFIG_PATH\")\n else:\n config_path = MemGPTConfig.config_path\n\n # insure all configuration directories exist\n cls.create_config_dir()\n if os.path.exists(config_path):\n # read existing config\n config.read(config_path)\n\n # Handle extraction of nested LLMConfig and EmbeddingConfig\n llm_config_dict = {\n # Extract relevant LLM configuration from the config file\n \"model\": get_field(config, \"model\", \"model\"),\n \"model_endpoint\": get_field(config, \"model\", \"model_endpoint\"),\n \"model_endpoint_type\": get_field(config, \"model\", \"model_endpoint_type\"),\n \"model_wrapper\": get_field(config, \"model\", \"model_wrapper\"),\n \"context_window\": get_field(config, \"model\", \"context_window\"),\n }\n embedding_config_dict = {\n # Extract relevant Embedding configuration from the config file\n \"embedding_endpoint\": get_field(config, \"embedding\", \"embedding_endpoint\"),\n \"embedding_model\": get_field(config, \"embedding\", \"embedding_model\"),\n \"embedding_endpoint_type\": get_field(config, \"embedding\", \"embedding_endpoint_type\"),\n \"embedding_dim\": get_field(config, \"embedding\", \"embedding_dim\"),\n \"embedding_chunk_size\": get_field(config, \"embedding\", \"chunk_size\"),\n }\n # Correct the types that aren't strings\n if llm_config_dict[\"context_window\"] is not None:\n llm_config_dict[\"context_window\"] = int(llm_config_dict[\"context_window\"])\n if embedding_config_dict[\"embedding_dim\"] is not None:\n embedding_config_dict[\"embedding_dim\"] = int(embedding_config_dict[\"embedding_dim\"])\n if embedding_config_dict[\"embedding_chunk_size\"] is not None:\n embedding_config_dict[\"embedding_chunk_size\"] = int(embedding_config_dict[\"embedding_chunk_size\"])\n # Construct the inner properties\n llm_config = LLMConfig(**llm_config_dict)\n embedding_config = EmbeddingConfig(**embedding_config_dict)\n\n # Everything else\n config_dict = {\n # Two prepared configs\n \"default_llm_config\": llm_config,\n \"default_embedding_config\": embedding_config,\n # Agent related\n \"preset\": get_field(config, \"defaults\", \"preset\"),\n \"persona\": get_field(config, \"defaults\", \"persona\"),\n \"human\": get_field(config, \"defaults\", \"human\"),\n \"agent\": get_field(config, \"defaults\", \"agent\"),\n # Storage related\n \"archival_storage_type\": get_field(config, \"archival_storage\", \"type\"),\n \"archival_storage_path\": get_field(config, \"archival_storage\", \"path\"),\n \"archival_storage_uri\": get_field(config, \"archival_storage\", \"uri\"),\n \"recall_storage_type\": get_field(config, \"recall_storage\", \"type\"),\n \"recall_storage_path\": get_field(config, \"recall_storage\", \"path\"),\n \"recall_storage_uri\": get_field(config, \"recall_storage\", \"uri\"),\n \"metadata_storage_type\": get_field(config, \"metadata_storage\", \"type\"),\n \"metadata_storage_path\": get_field(config, \"metadata_storage\", \"path\"),\n \"metadata_storage_uri\": get_field(config, \"metadata_storage\", \"uri\"),\n # Misc\n \"anon_clientid\": get_field(config, \"client\", \"anon_clientid\"),\n \"config_path\": config_path,\n \"memgpt_version\": get_field(config, \"version\", \"memgpt_version\"),\n }\n\n # Don't include null values\n config_dict = {k: v for k, v in config_dict.items() if v is not None}\n\n return cls(**config_dict)\n\n # create new config\n anon_clientid = MemGPTConfig.generate_uuid()\n config = cls(anon_clientid=anon_clientid, config_path=config_path)\n config.create_config_dir() # create dirs\n config.save() # save updated config\n\n return config\n\n def save(self):\n import memgpt\n\n config = configparser.ConfigParser()\n\n # CLI defaults\n set_field(config, \"defaults\", \"preset\", self.preset)\n set_field(config, \"defaults\", \"persona\", self.persona)\n set_field(config, \"defaults\", \"human\", self.human)\n set_field(config, \"defaults\", \"agent\", self.agent)\n\n # model defaults\n set_field(config, \"model\", \"model\", self.default_llm_config.model)\n set_field(config, \"model\", \"model_endpoint\", self.default_llm_config.model_endpoint)\n set_field(config, \"model\", \"model_endpoint_type\", self.default_llm_config.model_endpoint_type)\n set_field(config, \"model\", \"model_wrapper\", self.default_llm_config.model_wrapper)\n set_field(config, \"model\", \"context_window\", str(self.default_llm_config.context_window))\n\n # embeddings\n set_field(config, \"embedding\", \"embedding_endpoint_type\", self.default_embedding_config.embedding_endpoint_type)\n set_field(config, \"embedding\", \"embedding_endpoint\", self.default_embedding_config.embedding_endpoint)\n set_field(config, \"embedding\", \"embedding_model\", self.default_embedding_config.embedding_model)\n set_field(config, \"embedding\", \"embedding_dim\", str(self.default_embedding_config.embedding_dim))\n set_field(config, \"embedding\", \"embedding_chunk_size\", str(self.default_embedding_config.embedding_chunk_size))\n\n # archival storage\n set_field(config, \"archival_storage\", \"type\", self.archival_storage_type)\n set_field(config, \"archival_storage\", \"path\", self.archival_storage_path)\n set_field(config, \"archival_storage\", \"uri\", self.archival_storage_uri)\n\n # recall storage\n set_field(config, \"recall_storage\", \"type\", self.recall_storage_type)\n set_field(config, \"recall_storage\", \"path\", self.recall_storage_path)\n set_field(config, \"recall_storage\", \"uri\", self.recall_storage_uri)\n\n # metadata storage\n set_field(config, \"metadata_storage\", \"type\", self.metadata_storage_type)\n set_field(config, \"metadata_storage\", \"path\", self.metadata_storage_path)\n set_field(config, \"metadata_storage\", \"uri\", self.metadata_storage_uri)\n\n # set version\n set_field(config, \"version\", \"memgpt_version\", memgpt.__version__)\n\n # client\n if not self.anon_clientid:\n self.anon_clientid = self.generate_uuid()\n set_field(config, \"client\", \"anon_clientid\", self.anon_clientid)\n\n # always make sure all directories are present\n self.create_config_dir()\n\n with open(self.config_path, \"w\") as f:\n config.write(f)\n logger.debug(f\"Saved Config: {self.config_path}\")\n\n @staticmethod\n def exists():\n # allow overriding with env variables\n if os.getenv(\"MEMGPT_CONFIG_PATH\"):\n config_path = os.getenv(\"MEMGPT_CONFIG_PATH\")\n else:\n config_path = MemGPTConfig.config_path\n\n assert not os.path.isdir(config_path), f\"Config path {config_path} cannot be set to a directory.\"\n return os.path.exists(config_path)\n\n @staticmethod\n def create_config_dir():\n if not os.path.exists(MEMGPT_DIR):\n os.makedirs(MEMGPT_DIR, exist_ok=True)\n\n folders = [\"personas\", \"humans\", \"archival\", \"agents\", \"functions\", \"system_prompts\", \"presets\", \"settings\"]\n\n for folder in folders:\n if not os.path.exists(os.path.join(MEMGPT_DIR, folder)):\n os.makedirs(os.path.join(MEMGPT_DIR, folder))" }, { "identifier": "MemGPTCredentials", "path": "memgpt/credentials.py", "snippet": "class MemGPTCredentials:\n # credentials for MemGPT\n credentials_path: str = os.path.join(MEMGPT_DIR, \"credentials\")\n\n # openai config\n openai_auth_type: str = \"bearer_token\"\n openai_key: str = None\n\n # azure config\n azure_auth_type: str = \"api_key\"\n azure_key: str = None\n azure_endpoint: str = None\n azure_version: str = None\n azure_deployment: str = None\n azure_embedding_deployment: str = None\n\n # custom llm API config\n openllm_auth_type: str = None\n openllm_key: str = None\n\n @classmethod\n def load(cls) -> \"MemGPTCredentials\":\n config = configparser.ConfigParser()\n\n # allow overriding with env variables\n if os.getenv(\"MEMGPT_CREDENTIALS_PATH\"):\n credentials_path = os.getenv(\"MEMGPT_CREDENTIALS_PATH\")\n else:\n credentials_path = MemGPTCredentials.credentials_path\n\n if os.path.exists(credentials_path):\n # read existing credentials\n config.read(credentials_path)\n config_dict = {\n # openai\n \"openai_auth_type\": get_field(config, \"openai\", \"auth_type\"),\n \"openai_key\": get_field(config, \"openai\", \"key\"),\n # azure\n \"azure_auth_type\": get_field(config, \"azure\", \"auth_type\"),\n \"azure_key\": get_field(config, \"azure\", \"key\"),\n \"azure_endpoint\": get_field(config, \"azure\", \"endpoint\"),\n \"azure_version\": get_field(config, \"azure\", \"version\"),\n \"azure_deployment\": get_field(config, \"azure\", \"deployment\"),\n \"azure_embedding_deployment\": get_field(config, \"azure\", \"embedding_deployment\"),\n # open llm\n \"openllm_auth_type\": get_field(config, \"openllm\", \"auth_type\"),\n \"openllm_key\": get_field(config, \"openllm\", \"key\"),\n # path\n \"credentials_path\": credentials_path,\n }\n config_dict = {k: v for k, v in config_dict.items() if v is not None}\n return cls(**config_dict)\n\n # create new config\n config = cls(credentials_path=credentials_path)\n config.save() # save updated config\n return config\n\n def save(self):\n import memgpt\n\n config = configparser.ConfigParser()\n # openai config\n set_field(config, \"openai\", \"auth_type\", self.openai_auth_type)\n set_field(config, \"openai\", \"key\", self.openai_key)\n\n # azure config\n set_field(config, \"azure\", \"auth_type\", self.azure_auth_type)\n set_field(config, \"azure\", \"key\", self.azure_key)\n set_field(config, \"azure\", \"endpoint\", self.azure_endpoint)\n set_field(config, \"azure\", \"version\", self.azure_version)\n set_field(config, \"azure\", \"deployment\", self.azure_deployment)\n set_field(config, \"azure\", \"embedding_deployment\", self.azure_embedding_deployment)\n\n # openai config\n set_field(config, \"openllm\", \"auth_type\", self.openllm_auth_type)\n set_field(config, \"openllm\", \"key\", self.openllm_key)\n\n if not os.path.exists(MEMGPT_DIR):\n os.makedirs(MEMGPT_DIR, exist_ok=True)\n with open(self.credentials_path, \"w\") as f:\n config.write(f)\n\n @staticmethod\n def exists():\n # allow overriding with env variables\n if os.getenv(\"MEMGPT_CREDENTIALS_PATH\"):\n credentials_path = os.getenv(\"MEMGPT_CREDENTIALS_PATH\")\n else:\n credentials_path = MemGPTCredentials.credentials_path\n\n assert not os.path.isdir(credentials_path), f\"Credentials path {credentials_path} cannot be set to a directory.\"\n return os.path.exists(credentials_path)" }, { "identifier": "SUPPORTED_AUTH_TYPES", "path": "memgpt/credentials.py", "snippet": "SUPPORTED_AUTH_TYPES = [\"bearer_token\", \"api_key\"]" }, { "identifier": "MEMGPT_DIR", "path": "memgpt/constants.py", "snippet": "MEMGPT_DIR = os.path.join(os.path.expanduser(\"~\"), \".memgpt\")\r" }, { "identifier": "LLM_MAX_TOKENS", "path": "memgpt/constants.py", "snippet": "LLM_MAX_TOKENS = {\r\n \"DEFAULT\": 8192,\r\n ## OpenAI models: https://platform.openai.com/docs/models/overview\r\n # gpt-4\r\n \"gpt-4-1106-preview\": 128000,\r\n \"gpt-4\": 8192,\r\n \"gpt-4-32k\": 32768,\r\n \"gpt-4-0613\": 8192,\r\n \"gpt-4-32k-0613\": 32768,\r\n \"gpt-4-0314\": 8192, # legacy\r\n \"gpt-4-32k-0314\": 32768, # legacy\r\n # gpt-3.5\r\n \"gpt-3.5-turbo-1106\": 16385,\r\n \"gpt-3.5-turbo\": 4096,\r\n \"gpt-3.5-turbo-16k\": 16385,\r\n \"gpt-3.5-turbo-0613\": 4096, # legacy\r\n \"gpt-3.5-turbo-16k-0613\": 16385, # legacy\r\n \"gpt-3.5-turbo-0301\": 4096, # legacy\r\n}\r" }, { "identifier": "DEFAULT_ENDPOINTS", "path": "memgpt/local_llm/constants.py", "snippet": "DEFAULT_ENDPOINTS = {\n \"koboldcpp\": \"http://localhost:5001\",\n \"llamacpp\": \"http://localhost:8080\",\n \"lmstudio\": \"http://localhost:1234\",\n \"lmstudio-legacy\": \"http://localhost:1234\",\n \"ollama\": \"http://localhost:11434\",\n \"webui-legacy\": \"http://localhost:5000\",\n \"webui\": \"http://localhost:5000\",\n \"vllm\": \"http://localhost:8000\",\n}" }, { "identifier": "DEFAULT_OLLAMA_MODEL", "path": "memgpt/local_llm/constants.py", "snippet": "DEFAULT_OLLAMA_MODEL = \"dolphin2.2-mistral:7b-q6_K\"" }, { "identifier": "DEFAULT_WRAPPER_NAME", "path": "memgpt/local_llm/constants.py", "snippet": "DEFAULT_WRAPPER_NAME = \"chatml\"" }, { "identifier": "get_available_wrappers", "path": "memgpt/local_llm/utils.py", "snippet": "def get_available_wrappers() -> dict:\n return {\n # New chatml-based wrappers\n \"chatml\": chatml.ChatMLInnerMonologueWrapper(),\n \"chatml-grammar\": chatml.ChatMLInnerMonologueWrapper(),\n \"chatml-noforce\": chatml.ChatMLOuterInnerMonologueWrapper(),\n \"chatml-noforce-grammar\": chatml.ChatMLOuterInnerMonologueWrapper(),\n # \"chatml-noforce-sysm\": chatml.ChatMLOuterInnerMonologueWrapper(use_system_role_in_user=True),\n \"chatml-noforce-roles\": chatml.ChatMLOuterInnerMonologueWrapper(use_system_role_in_user=True, allow_function_role=True),\n \"chatml-noforce-roles-grammar\": chatml.ChatMLOuterInnerMonologueWrapper(use_system_role_in_user=True, allow_function_role=True),\n # With extra hints\n \"chatml-hints\": chatml.ChatMLInnerMonologueWrapper(assistant_prefix_hint=True),\n \"chatml-hints-grammar\": chatml.ChatMLInnerMonologueWrapper(assistant_prefix_hint=True),\n \"chatml-noforce-hints\": chatml.ChatMLOuterInnerMonologueWrapper(assistant_prefix_hint=True),\n \"chatml-noforce-hints-grammar\": chatml.ChatMLOuterInnerMonologueWrapper(assistant_prefix_hint=True),\n # Legacy wrappers\n \"airoboros-l2-70b-2.1\": airoboros.Airoboros21InnerMonologueWrapper(),\n \"airoboros-l2-70b-2.1-grammar\": airoboros.Airoboros21InnerMonologueWrapper(assistant_prefix_extra=None),\n \"dolphin-2.1-mistral-7b\": dolphin.Dolphin21MistralWrapper(),\n \"dolphin-2.1-mistral-7b-grammar\": dolphin.Dolphin21MistralWrapper(include_opening_brace_in_prefix=False),\n \"zephyr-7B\": zephyr.ZephyrMistralInnerMonologueWrapper(),\n \"zephyr-7B-grammar\": zephyr.ZephyrMistralInnerMonologueWrapper(include_opening_brace_in_prefix=False),\n }" }, { "identifier": "openai_get_model_list", "path": "memgpt/llm_api_tools.py", "snippet": "def openai_get_model_list(url: str, api_key: Union[str, None]) -> dict:\r\n \"\"\"https://platform.openai.com/docs/api-reference/models/list\"\"\"\r\n from memgpt.utils import printd\r\n\r\n url = smart_urljoin(url, \"models\")\r\n\r\n headers = {\"Content-Type\": \"application/json\"}\r\n if api_key is not None:\r\n headers[\"Authorization\"] = f\"Bearer {api_key}\"\r\n\r\n printd(f\"Sending request to {url}\")\r\n try:\r\n response = requests.get(url, headers=headers)\r\n response.raise_for_status() # Raises HTTPError for 4XX/5XX status\r\n response = response.json() # convert to dict from string\r\n printd(f\"response = {response}\")\r\n return response\r\n except requests.exceptions.HTTPError as http_err:\r\n # Handle HTTP errors (e.g., response 4XX, 5XX)\r\n try:\r\n response = response.json()\r\n except:\r\n pass\r\n printd(f\"Got HTTPError, exception={http_err}, response={response}\")\r\n raise http_err\r\n except requests.exceptions.RequestException as req_err:\r\n # Handle other requests-related errors (e.g., connection error)\r\n try:\r\n response = response.json()\r\n except:\r\n pass\r\n printd(f\"Got RequestException, exception={req_err}, response={response}\")\r\n raise req_err\r\n except Exception as e:\r\n # Handle other potential errors\r\n try:\r\n response = response.json()\r\n except:\r\n pass\r\n printd(f\"Got unknown Exception, exception={e}, response={response}\")\r\n raise e\r" }, { "identifier": "azure_openai_get_model_list", "path": "memgpt/llm_api_tools.py", "snippet": "def azure_openai_get_model_list(url: str, api_key: Union[str, None], api_version: str) -> dict:\r\n \"\"\"https://learn.microsoft.com/en-us/rest/api/azureopenai/models/list?view=rest-azureopenai-2023-05-15&tabs=HTTP\"\"\"\r\n from memgpt.utils import printd\r\n\r\n # https://xxx.openai.azure.com/openai/models?api-version=xxx\r\n url = smart_urljoin(url, \"openai\")\r\n url = smart_urljoin(url, f\"models?api-version={api_version}\")\r\n\r\n headers = {\"Content-Type\": \"application/json\"}\r\n if api_key is not None:\r\n headers[\"api-key\"] = f\"{api_key}\"\r\n\r\n printd(f\"Sending request to {url}\")\r\n try:\r\n response = requests.get(url, headers=headers)\r\n response.raise_for_status() # Raises HTTPError for 4XX/5XX status\r\n response = response.json() # convert to dict from string\r\n printd(f\"response = {response}\")\r\n return response\r\n except requests.exceptions.HTTPError as http_err:\r\n # Handle HTTP errors (e.g., response 4XX, 5XX)\r\n try:\r\n response = response.json()\r\n except:\r\n pass\r\n printd(f\"Got HTTPError, exception={http_err}, response={response}\")\r\n raise http_err\r\n except requests.exceptions.RequestException as req_err:\r\n # Handle other requests-related errors (e.g., connection error)\r\n try:\r\n response = response.json()\r\n except:\r\n pass\r\n printd(f\"Got RequestException, exception={req_err}, response={response}\")\r\n raise req_err\r\n except Exception as e:\r\n # Handle other potential errors\r\n try:\r\n response = response.json()\r\n except:\r\n pass\r\n printd(f\"Got unknown Exception, exception={e}, response={response}\")\r\n raise e\r" }, { "identifier": "smart_urljoin", "path": "memgpt/llm_api_tools.py", "snippet": "def smart_urljoin(base_url, relative_url):\r\n \"\"\"urljoin is stupid and wants a trailing / at the end of the endpoint address, or it will chop the suffix off\"\"\"\r\n if not base_url.endswith(\"/\"):\r\n base_url += \"/\"\r\n return urllib.parse.urljoin(base_url, relative_url)\r" }, { "identifier": "shorten_key_middle", "path": "memgpt/server/utils.py", "snippet": "def shorten_key_middle(key_string, chars_each_side=3):\n \"\"\"\n Shortens a key string by showing a specified number of characters on each side and adding an ellipsis in the middle.\n\n Args:\n key_string (str): The key string to be shortened.\n chars_each_side (int): The number of characters to show on each side of the ellipsis.\n\n Returns:\n str: The shortened key string with an ellipsis in the middle.\n \"\"\"\n if not key_string:\n return key_string\n key_length = len(key_string)\n if key_length <= 2 * chars_each_side:\n return \"...\" # Return ellipsis if the key is too short\n else:\n return key_string[:chars_each_side] + \"...\" + key_string[-chars_each_side:]" }, { "identifier": "User", "path": "memgpt/data_types.py", "snippet": "class User:\n\n \"\"\"Defines user and default configurations\"\"\"\n\n # TODO: make sure to encrypt/decrypt keys before storing in DB\n\n def __init__(\n self,\n # name: str,\n id: Optional[uuid.UUID] = None,\n default_preset=DEFAULT_PRESET,\n default_persona=DEFAULT_PERSONA,\n default_human=DEFAULT_HUMAN,\n default_agent=None,\n # other\n policies_accepted=False,\n ):\n if id is None:\n self.id = uuid.uuid4()\n else:\n self.id = id\n assert isinstance(self.id, uuid.UUID), f\"UUID {self.id} must be a UUID type\"\n\n self.default_preset = default_preset\n self.default_persona = default_persona\n self.default_human = default_human\n self.default_agent = default_agent\n\n # misc\n self.policies_accepted = policies_accepted" }, { "identifier": "LLMConfig", "path": "memgpt/data_types.py", "snippet": "class LLMConfig:\n def __init__(\n self,\n model: Optional[str] = \"gpt-4\",\n model_endpoint_type: Optional[str] = \"openai\",\n model_endpoint: Optional[str] = \"https://api.openai.com/v1\",\n model_wrapper: Optional[str] = None,\n context_window: Optional[int] = None,\n ):\n self.model = model\n self.model_endpoint_type = model_endpoint_type\n self.model_endpoint = model_endpoint\n self.model_wrapper = model_wrapper\n self.context_window = context_window\n\n if context_window is None:\n self.context_window = LLM_MAX_TOKENS[self.model] if self.model in LLM_MAX_TOKENS else LLM_MAX_TOKENS[\"DEFAULT\"]\n else:\n self.context_window = context_window" }, { "identifier": "EmbeddingConfig", "path": "memgpt/data_types.py", "snippet": "class EmbeddingConfig:\n def __init__(\n self,\n embedding_endpoint_type: Optional[str] = \"openai\",\n embedding_endpoint: Optional[str] = \"https://api.openai.com/v1\",\n embedding_model: Optional[str] = \"text-embedding-ada-002\",\n embedding_dim: Optional[int] = 1536,\n embedding_chunk_size: Optional[int] = 300,\n ):\n self.embedding_endpoint_type = embedding_endpoint_type\n self.embedding_endpoint = embedding_endpoint\n self.embedding_model = embedding_model\n self.embedding_dim = embedding_dim\n self.embedding_chunk_size = embedding_chunk_size" }, { "identifier": "MetadataStore", "path": "memgpt/metadata.py", "snippet": "class MetadataStore:\n def __init__(self, config: MemGPTConfig):\n # TODO: get DB URI or path\n if config.metadata_storage_type == \"postgres\":\n self.uri = config.metadata_storage_uri\n elif config.metadata_storage_type == \"sqlite\":\n path = os.path.join(config.metadata_storage_path, \"sqlite.db\")\n self.uri = f\"sqlite:///{path}\"\n else:\n raise ValueError(f\"Invalid metadata storage type: {config.metadata_storage_type}\")\n\n # TODO: check to see if table(s) need to be greated or not\n\n self.engine = create_engine(self.uri)\n Base.metadata.create_all(\n self.engine, tables=[UserModel.__table__, AgentModel.__table__, SourceModel.__table__, AgentSourceMappingModel.__table__]\n )\n session_maker = sessionmaker(bind=self.engine)\n self.session = session_maker()\n\n @enforce_types\n def create_agent(self, agent: AgentState):\n # insert into agent table\n # make sure agent.name does not already exist for user user_id\n if self.session.query(AgentModel).filter(AgentModel.name == agent.name).filter(AgentModel.user_id == agent.user_id).count() > 0:\n raise ValueError(f\"Agent with name {agent.name} already exists\")\n self.session.add(AgentModel(**vars(agent)))\n self.session.commit()\n\n @enforce_types\n def create_source(self, source: Source):\n # make sure source.name does not already exist for user\n if (\n self.session.query(SourceModel).filter(SourceModel.name == source.name).filter(SourceModel.user_id == source.user_id).count()\n > 0\n ):\n raise ValueError(f\"Source with name {source.name} already exists\")\n self.session.add(SourceModel(**vars(source)))\n self.session.commit()\n\n @enforce_types\n def create_user(self, user: User):\n if self.session.query(UserModel).filter(UserModel.id == user.id).count() > 0:\n raise ValueError(f\"User with id {user.id} already exists\")\n self.session.add(UserModel(**vars(user)))\n self.session.commit()\n\n @enforce_types\n def update_agent(self, agent: AgentState):\n self.session.query(AgentModel).filter(AgentModel.id == agent.id).update(vars(agent))\n self.session.commit()\n\n @enforce_types\n def update_user(self, user: User):\n self.session.query(UserModel).filter(UserModel.id == user.id).update(vars(user))\n self.session.commit()\n\n @enforce_types\n def update_source(self, source: Source):\n self.session.query(SourceModel).filter(SourceModel.id == source.id).update(vars(source))\n self.session.commit()\n\n @enforce_types\n def delete_agent(self, agent_id: uuid.UUID):\n self.session.query(AgentModel).filter(AgentModel.id == agent_id).delete()\n self.session.commit()\n\n @enforce_types\n def delete_source(self, source_id: uuid.UUID):\n # delete from sources table\n self.session.query(SourceModel).filter(SourceModel.id == source_id).delete()\n\n # delete any mappings\n self.session.query(AgentSourceMappingModel).filter(AgentSourceMappingModel.source_id == source_id).delete()\n\n self.session.commit()\n\n @enforce_types\n def delete_user(self, user_id: uuid.UUID):\n # delete from users table\n self.session.query(UserModel).filter(UserModel.id == user_id).delete()\n\n # delete associated agents\n self.session.query(AgentModel).filter(AgentModel.user_id == user_id).delete()\n\n # delete associated sources\n self.session.query(SourceModel).filter(SourceModel.user_id == user_id).delete()\n\n # delete associated mappings\n self.session.query(AgentSourceMappingModel).filter(AgentSourceMappingModel.user_id == user_id).delete()\n\n self.session.commit()\n\n @enforce_types\n def list_agents(self, user_id: uuid.UUID) -> List[AgentState]:\n results = self.session.query(AgentModel).filter(AgentModel.user_id == user_id).all()\n return [r.to_record() for r in results]\n\n @enforce_types\n def list_sources(self, user_id: uuid.UUID) -> List[Source]:\n results = self.session.query(SourceModel).filter(SourceModel.user_id == user_id).all()\n return [r.to_record() for r in results]\n\n @enforce_types\n def get_agent(\n self, agent_id: Optional[uuid.UUID] = None, agent_name: Optional[str] = None, user_id: Optional[uuid.UUID] = None\n ) -> Optional[AgentState]:\n if agent_id:\n results = self.session.query(AgentModel).filter(AgentModel.id == agent_id).all()\n else:\n assert agent_name is not None and user_id is not None, \"Must provide either agent_id or agent_name\"\n results = self.session.query(AgentModel).filter(AgentModel.name == agent_name).filter(AgentModel.user_id == user_id).all()\n\n if len(results) == 0:\n return None\n assert len(results) == 1, f\"Expected 1 result, got {len(results)}\" # should only be one result\n return results[0].to_record()\n\n @enforce_types\n def get_user(self, user_id: uuid.UUID) -> Optional[User]:\n results = self.session.query(UserModel).filter(UserModel.id == user_id).all()\n if len(results) == 0:\n return None\n assert len(results) == 1, f\"Expected 1 result, got {len(results)}\"\n return results[0].to_record()\n\n @enforce_types\n def get_source(\n self, source_id: Optional[uuid.UUID] = None, user_id: Optional[uuid.UUID] = None, source_name: Optional[str] = None\n ) -> Optional[Source]:\n if source_id:\n results = self.session.query(SourceModel).filter(SourceModel.id == source_id).all()\n else:\n assert user_id is not None and source_name is not None\n results = self.session.query(SourceModel).filter(SourceModel.name == source_name).filter(SourceModel.user_id == user_id).all()\n if len(results) == 0:\n return None\n assert len(results) == 1, f\"Expected 1 result, got {len(results)}\"\n return results[0].to_record()\n\n # agent source metadata\n @enforce_types\n def attach_source(self, user_id: uuid.UUID, agent_id: uuid.UUID, source_id: uuid.UUID):\n self.session.add(AgentSourceMappingModel(user_id=user_id, agent_id=agent_id, source_id=source_id))\n self.session.commit()\n\n @enforce_types\n def list_attached_sources(self, agent_id: uuid.UUID) -> List[Column]:\n results = self.session.query(AgentSourceMappingModel).filter(AgentSourceMappingModel.agent_id == agent_id).all()\n return [r.source_id for r in results]\n\n @enforce_types\n def list_attached_agents(self, source_id: uuid.UUID):\n results = self.session.query(AgentSourceMappingModel).filter(AgentSourceMappingModel.source_id == source_id).all()\n return [r.agent_id for r in results]\n\n @enforce_types\n def detach_source(self, agent_id: uuid.UUID, source_id: uuid.UUID):\n self.session.query(AgentSourceMappingModel).filter(\n AgentSourceMappingModel.agent_id == agent_id, AgentSourceMappingModel.source_id == source_id\n ).delete()\n self.session.commit()" }, { "identifier": "StorageConnector", "path": "memgpt/agent_store/storage.py", "snippet": "class StorageConnector:\n \"\"\"Defines a DB connection that is user-specific to access data: Documents, Passages, Archival/Recall Memory\"\"\"\n\n def __init__(self, table_type: TableType, config: MemGPTConfig, user_id, agent_id=None):\n self.user_id = user_id\n self.agent_id = agent_id\n self.table_type = table_type\n\n # get object type\n if table_type == TableType.ARCHIVAL_MEMORY:\n self.type = Passage\n self.table_name = ARCHIVAL_TABLE_NAME\n elif table_type == TableType.RECALL_MEMORY:\n self.type = Message\n self.table_name = RECALL_TABLE_NAME\n elif table_type == TableType.DOCUMENTS:\n self.type = Document\n self.table_name == DOCUMENT_TABLE_NAME\n elif table_type == TableType.PASSAGES:\n self.type = Passage\n self.table_name = PASSAGE_TABLE_NAME\n else:\n raise ValueError(f\"Table type {table_type} not implemented\")\n printd(f\"Using table name {self.table_name}\")\n\n # setup base filters for agent-specific tables\n if self.table_type == TableType.ARCHIVAL_MEMORY or self.table_type == TableType.RECALL_MEMORY:\n # agent-specific table\n assert agent_id is not None, \"Agent ID must be provided for agent-specific tables\"\n self.filters = {\"user_id\": self.user_id, \"agent_id\": self.agent_id}\n elif self.table_type == TableType.PASSAGES or self.table_type == TableType.DOCUMENTS:\n # setup base filters for user-specific tables\n assert agent_id is None, \"Agent ID must not be provided for user-specific tables\"\n self.filters = {\"user_id\": self.user_id}\n else:\n raise ValueError(f\"Table type {table_type} not implemented\")\n\n def get_filters(self, filters: Optional[Dict] = {}):\n # get all filters for query\n if filters is not None:\n filter_conditions = {**self.filters, **filters}\n else:\n filter_conditions = self.filters\n return filter_conditions\n\n @staticmethod\n def get_storage_connector(table_type: TableType, config: MemGPTConfig, user_id, agent_id=None):\n if table_type == TableType.ARCHIVAL_MEMORY or table_type == TableType.PASSAGES:\n storage_type = config.archival_storage_type\n elif table_type == TableType.RECALL_MEMORY:\n storage_type = config.recall_storage_type\n else:\n raise ValueError(f\"Table type {table_type} not implemented\")\n\n if storage_type == \"postgres\":\n from memgpt.agent_store.db import PostgresStorageConnector\n\n return PostgresStorageConnector(table_type, config, user_id, agent_id)\n elif storage_type == \"chroma\":\n from memgpt.agent_store.chroma import ChromaStorageConnector\n\n return ChromaStorageConnector(table_type, config, user_id, agent_id)\n\n # TODO: add back\n # elif storage_type == \"lancedb\":\n # from memgpt.agent_store.db import LanceDBConnector\n\n # return LanceDBConnector(agent_config=agent_config, table_type=table_type)\n\n elif storage_type == \"sqlite\":\n from memgpt.agent_store.db import SQLLiteStorageConnector\n\n return SQLLiteStorageConnector(table_type, config, user_id, agent_id)\n\n else:\n raise NotImplementedError(f\"Storage type {storage_type} not implemented\")\n\n @staticmethod\n def get_archival_storage_connector(user_id, agent_id):\n config = MemGPTConfig.load()\n return StorageConnector.get_storage_connector(TableType.ARCHIVAL_MEMORY, config, user_id, agent_id)\n\n @staticmethod\n def get_recall_storage_connector(user_id, agent_id):\n config = MemGPTConfig.load()\n return StorageConnector.get_storage_connector(TableType.RECALL_MEMORY, config, user_id, agent_id)\n\n @abstractmethod\n def get_filters(self, filters: Optional[Dict] = {}):\n pass\n\n @abstractmethod\n def get_all_paginated(self, filters: Optional[Dict] = {}, page_size: Optional[int] = 1000) -> Iterator[List[Record]]:\n pass\n\n @abstractmethod\n def get_all(self, filters: Optional[Dict] = {}, limit=10) -> List[Record]:\n pass\n\n @abstractmethod\n def get(self, id: str) -> Optional[Record]:\n pass\n\n @abstractmethod\n def size(self, filters: Optional[Dict] = {}) -> int:\n pass\n\n @abstractmethod\n def insert(self, record: Record):\n pass\n\n @abstractmethod\n def insert_many(self, records: List[Record], show_progress=False):\n pass\n\n @abstractmethod\n def query(self, query: str, query_vec: List[float], top_k: int = 10, filters: Optional[Dict] = {}) -> List[Record]:\n pass\n\n @abstractmethod\n def query_date(self, start_date, end_date):\n pass\n\n @abstractmethod\n def query_text(self, query):\n pass\n\n @abstractmethod\n def delete_table(self):\n pass\n\n @abstractmethod\n def delete(self, filters: Optional[Dict] = {}):\n pass\n\n @abstractmethod\n def save(self):\n pass" }, { "identifier": "TableType", "path": "memgpt/agent_store/storage.py", "snippet": "class TableType:\n ARCHIVAL_MEMORY = \"archival_memory\" # recall memory table: memgpt_agent_{agent_id}\n RECALL_MEMORY = \"recall_memory\" # archival memory table: memgpt_agent_recall_{agent_id}\n PASSAGES = \"passages\" # TODO\n DOCUMENTS = \"documents\" # TODO" } ]
import builtins import uuid import questionary import typer import os import shutil from tqdm import tqdm from prettytable import PrettyTable from typing import Annotated from enum import Enum from memgpt.log import logger from memgpt import utils from memgpt.config import MemGPTConfig from memgpt.credentials import MemGPTCredentials, SUPPORTED_AUTH_TYPES from memgpt.constants import MEMGPT_DIR from memgpt.constants import LLM_MAX_TOKENS from memgpt.local_llm.constants import DEFAULT_ENDPOINTS, DEFAULT_OLLAMA_MODEL, DEFAULT_WRAPPER_NAME from memgpt.local_llm.utils import get_available_wrappers from memgpt.llm_api_tools import openai_get_model_list, azure_openai_get_model_list, smart_urljoin from memgpt.server.utils import shorten_key_middle from memgpt.data_types import User, LLMConfig, EmbeddingConfig from memgpt.metadata import MetadataStore from memgpt.agent_store.storage import StorageConnector, TableType from memgpt.presets.presets import preset_options
13,996
def get_azure_credentials(): creds = dict( azure_key=os.getenv("AZURE_OPENAI_KEY"), azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"), azure_version=os.getenv("AZURE_OPENAI_VERSION"), azure_deployment=os.getenv("AZURE_OPENAI_DEPLOYMENT"), azure_embedding_deployment=os.getenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT"), ) # embedding endpoint and version default to non-embedding creds["azure_embedding_endpoint"] = os.getenv("AZURE_OPENAI_EMBEDDING_ENDPOINT", creds["azure_endpoint"]) creds["azure_embedding_version"] = os.getenv("AZURE_OPENAI_EMBEDDING_VERSION", creds["azure_version"]) return creds def get_openai_credentials(): openai_key = os.getenv("OPENAI_API_KEY") return openai_key def configure_llm_endpoint(config: MemGPTConfig, credentials: MemGPTCredentials): # configure model endpoint model_endpoint_type, model_endpoint = None, None # get default default_model_endpoint_type = config.default_llm_config.model_endpoint_type if config.default_llm_config.model_endpoint_type is not None and config.default_llm_config.model_endpoint_type not in [ "openai", "azure", ]: # local model default_model_endpoint_type = "local" provider = questionary.select( "Select LLM inference provider:", choices=["openai", "azure", "local"], default=default_model_endpoint_type ).ask() if provider is None: raise KeyboardInterrupt # set: model_endpoint_type, model_endpoint if provider == "openai": # check for key if credentials.openai_key is None: # allow key to get pulled from env vars openai_api_key = os.getenv("OPENAI_API_KEY", None) # if we still can't find it, ask for it as input if openai_api_key is None: while openai_api_key is None or len(openai_api_key) == 0: # Ask for API key as input openai_api_key = questionary.password( "Enter your OpenAI API key (starts with 'sk-', see https://platform.openai.com/api-keys):" ).ask() if openai_api_key is None: raise KeyboardInterrupt credentials.openai_key = openai_api_key credentials.save() else: # Give the user an opportunity to overwrite the key openai_api_key = None default_input = ( shorten_key_middle(credentials.openai_key) if credentials.openai_key.startswith("sk-") else credentials.openai_key ) openai_api_key = questionary.password( "Enter your OpenAI API key (starts with 'sk-', see https://platform.openai.com/api-keys):", default=default_input, ).ask() if openai_api_key is None: raise KeyboardInterrupt # If the user modified it, use the new one if openai_api_key != default_input: credentials.openai_key = openai_api_key credentials.save() model_endpoint_type = "openai" model_endpoint = "https://api.openai.com/v1" model_endpoint = questionary.text("Override default endpoint:", default=model_endpoint).ask() if model_endpoint is None: raise KeyboardInterrupt provider = "openai" elif provider == "azure": # check for necessary vars azure_creds = get_azure_credentials() if not all([azure_creds["azure_key"], azure_creds["azure_endpoint"], azure_creds["azure_version"]]): raise ValueError( "Missing environment variables for Azure (see https://memgpt.readme.io/docs/endpoints#azure-openai). Please set then run `memgpt configure` again." ) else: credentials.azure_key = azure_creds["azure_key"] credentials.azure_endpoint = azure_creds["azure_endpoint"] credentials.azure_version = azure_creds["azure_version"] config.save() model_endpoint_type = "azure" model_endpoint = azure_creds["azure_endpoint"] else: # local models backend_options = ["webui", "webui-legacy", "llamacpp", "koboldcpp", "ollama", "lmstudio", "lmstudio-legacy", "vllm", "openai"] default_model_endpoint_type = None if config.default_llm_config.model_endpoint_type in backend_options: # set from previous config default_model_endpoint_type = config.default_llm_config.model_endpoint_type model_endpoint_type = questionary.select( "Select LLM backend (select 'openai' if you have an OpenAI compatible proxy):", backend_options, default=default_model_endpoint_type, ).ask() if model_endpoint_type is None: raise KeyboardInterrupt # set default endpoint # if OPENAI_API_BASE is set, assume that this is the IP+port the user wanted to use default_model_endpoint = os.getenv("OPENAI_API_BASE") # if OPENAI_API_BASE is not set, try to pull a default IP+port format from a hardcoded set if default_model_endpoint is None: if model_endpoint_type in DEFAULT_ENDPOINTS: default_model_endpoint = DEFAULT_ENDPOINTS[model_endpoint_type] model_endpoint = questionary.text("Enter default endpoint:", default=default_model_endpoint).ask() if model_endpoint is None: raise KeyboardInterrupt
# from global logging configuration # from memgpt.cli import app # from memgpt.agent_store.storage import StorageConnector, TableType app = typer.Typer() def get_azure_credentials(): creds = dict( azure_key=os.getenv("AZURE_OPENAI_KEY"), azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"), azure_version=os.getenv("AZURE_OPENAI_VERSION"), azure_deployment=os.getenv("AZURE_OPENAI_DEPLOYMENT"), azure_embedding_deployment=os.getenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT"), ) # embedding endpoint and version default to non-embedding creds["azure_embedding_endpoint"] = os.getenv("AZURE_OPENAI_EMBEDDING_ENDPOINT", creds["azure_endpoint"]) creds["azure_embedding_version"] = os.getenv("AZURE_OPENAI_EMBEDDING_VERSION", creds["azure_version"]) return creds def get_openai_credentials(): openai_key = os.getenv("OPENAI_API_KEY") return openai_key def configure_llm_endpoint(config: MemGPTConfig, credentials: MemGPTCredentials): # configure model endpoint model_endpoint_type, model_endpoint = None, None # get default default_model_endpoint_type = config.default_llm_config.model_endpoint_type if config.default_llm_config.model_endpoint_type is not None and config.default_llm_config.model_endpoint_type not in [ "openai", "azure", ]: # local model default_model_endpoint_type = "local" provider = questionary.select( "Select LLM inference provider:", choices=["openai", "azure", "local"], default=default_model_endpoint_type ).ask() if provider is None: raise KeyboardInterrupt # set: model_endpoint_type, model_endpoint if provider == "openai": # check for key if credentials.openai_key is None: # allow key to get pulled from env vars openai_api_key = os.getenv("OPENAI_API_KEY", None) # if we still can't find it, ask for it as input if openai_api_key is None: while openai_api_key is None or len(openai_api_key) == 0: # Ask for API key as input openai_api_key = questionary.password( "Enter your OpenAI API key (starts with 'sk-', see https://platform.openai.com/api-keys):" ).ask() if openai_api_key is None: raise KeyboardInterrupt credentials.openai_key = openai_api_key credentials.save() else: # Give the user an opportunity to overwrite the key openai_api_key = None default_input = ( shorten_key_middle(credentials.openai_key) if credentials.openai_key.startswith("sk-") else credentials.openai_key ) openai_api_key = questionary.password( "Enter your OpenAI API key (starts with 'sk-', see https://platform.openai.com/api-keys):", default=default_input, ).ask() if openai_api_key is None: raise KeyboardInterrupt # If the user modified it, use the new one if openai_api_key != default_input: credentials.openai_key = openai_api_key credentials.save() model_endpoint_type = "openai" model_endpoint = "https://api.openai.com/v1" model_endpoint = questionary.text("Override default endpoint:", default=model_endpoint).ask() if model_endpoint is None: raise KeyboardInterrupt provider = "openai" elif provider == "azure": # check for necessary vars azure_creds = get_azure_credentials() if not all([azure_creds["azure_key"], azure_creds["azure_endpoint"], azure_creds["azure_version"]]): raise ValueError( "Missing environment variables for Azure (see https://memgpt.readme.io/docs/endpoints#azure-openai). Please set then run `memgpt configure` again." ) else: credentials.azure_key = azure_creds["azure_key"] credentials.azure_endpoint = azure_creds["azure_endpoint"] credentials.azure_version = azure_creds["azure_version"] config.save() model_endpoint_type = "azure" model_endpoint = azure_creds["azure_endpoint"] else: # local models backend_options = ["webui", "webui-legacy", "llamacpp", "koboldcpp", "ollama", "lmstudio", "lmstudio-legacy", "vllm", "openai"] default_model_endpoint_type = None if config.default_llm_config.model_endpoint_type in backend_options: # set from previous config default_model_endpoint_type = config.default_llm_config.model_endpoint_type model_endpoint_type = questionary.select( "Select LLM backend (select 'openai' if you have an OpenAI compatible proxy):", backend_options, default=default_model_endpoint_type, ).ask() if model_endpoint_type is None: raise KeyboardInterrupt # set default endpoint # if OPENAI_API_BASE is set, assume that this is the IP+port the user wanted to use default_model_endpoint = os.getenv("OPENAI_API_BASE") # if OPENAI_API_BASE is not set, try to pull a default IP+port format from a hardcoded set if default_model_endpoint is None: if model_endpoint_type in DEFAULT_ENDPOINTS: default_model_endpoint = DEFAULT_ENDPOINTS[model_endpoint_type] model_endpoint = questionary.text("Enter default endpoint:", default=default_model_endpoint).ask() if model_endpoint is None: raise KeyboardInterrupt
while not utils.is_valid_url(model_endpoint):
1
2023-10-11 07:38:37+00:00
16k
xxlong0/Wonder3D
mvdiffusion/models/unet_mv2d_condition.py
[ { "identifier": "CrossAttnDownBlockMV2D", "path": "mvdiffusion/models/unet_mv2d_blocks.py", "snippet": "class CrossAttnDownBlockMV2D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n transformer_layers_per_block: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n num_attention_heads=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n downsample_padding=1,\n add_downsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n num_views: int = 1,\n cd_attention_last: bool = False,\n cd_attention_mid: bool = False,\n multiview_attention: bool = True,\n sparse_mv_attention: bool = False,\n mvcd_attention: bool=False\n ):\n super().__init__()\n resnets = []\n attentions = []\n\n self.has_cross_attention = True\n self.num_attention_heads = num_attention_heads\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n if not dual_cross_attention:\n attentions.append(\n TransformerMV2DModel(\n num_attention_heads,\n out_channels // num_attention_heads,\n in_channels=out_channels,\n num_layers=transformer_layers_per_block,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n num_views=num_views,\n cd_attention_last=cd_attention_last,\n cd_attention_mid=cd_attention_mid,\n multiview_attention=multiview_attention,\n sparse_mv_attention=sparse_mv_attention,\n mvcd_attention=mvcd_attention\n )\n )\n else:\n raise NotImplementedError\n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample2D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n additional_residuals=None,\n ):\n output_states = ()\n\n blocks = list(zip(self.resnets, self.attentions))\n\n for i, (resnet, attn) in enumerate(blocks):\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n ckpt_kwargs: Dict[str, Any] = {\"use_reentrant\": False} if is_torch_version(\">=\", \"1.11.0\") else {}\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(resnet),\n hidden_states,\n temb,\n **ckpt_kwargs,\n )\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n None, # timestep\n None, # class_labels\n cross_attention_kwargs,\n attention_mask,\n encoder_attention_mask,\n **ckpt_kwargs,\n )[0]\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n\n # apply additional residuals to the output of the last pair of resnet and attention blocks\n if i == len(blocks) - 1 and additional_residuals is not None:\n hidden_states = hidden_states + additional_residuals\n\n output_states = output_states + (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states" }, { "identifier": "CrossAttnUpBlockMV2D", "path": "mvdiffusion/models/unet_mv2d_blocks.py", "snippet": "class CrossAttnUpBlockMV2D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n prev_output_channel: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n transformer_layers_per_block: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n num_attention_heads=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n add_upsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n num_views: int = 1,\n cd_attention_last: bool = False,\n cd_attention_mid: bool = False,\n multiview_attention: bool = True,\n sparse_mv_attention: bool = False,\n mvcd_attention: bool=False\n ):\n super().__init__()\n resnets = []\n attentions = []\n\n self.has_cross_attention = True\n self.num_attention_heads = num_attention_heads\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock2D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n if not dual_cross_attention:\n attentions.append(\n TransformerMV2DModel(\n num_attention_heads,\n out_channels // num_attention_heads,\n in_channels=out_channels,\n num_layers=transformer_layers_per_block,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n num_views=num_views,\n cd_attention_last=cd_attention_last,\n cd_attention_mid=cd_attention_mid,\n multiview_attention=multiview_attention,\n sparse_mv_attention=sparse_mv_attention,\n mvcd_attention=mvcd_attention\n )\n )\n else:\n raise NotImplementedError\n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n upsample_size: Optional[int] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n for resnet, attn in zip(self.resnets, self.attentions):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n ckpt_kwargs: Dict[str, Any] = {\"use_reentrant\": False} if is_torch_version(\">=\", \"1.11.0\") else {}\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(resnet),\n hidden_states,\n temb,\n **ckpt_kwargs,\n )\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n None, # timestep\n None, # class_labels\n cross_attention_kwargs,\n attention_mask,\n encoder_attention_mask,\n **ckpt_kwargs,\n )[0]\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states" }, { "identifier": "UNetMidBlockMV2DCrossAttn", "path": "mvdiffusion/models/unet_mv2d_blocks.py", "snippet": "class UNetMidBlockMV2DCrossAttn(nn.Module):\n def __init__(\n self,\n in_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n transformer_layers_per_block: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n num_attention_heads=1,\n output_scale_factor=1.0,\n cross_attention_dim=1280,\n dual_cross_attention=False,\n use_linear_projection=False,\n upcast_attention=False,\n num_views: int = 1,\n cd_attention_last: bool = False,\n cd_attention_mid: bool = False,\n multiview_attention: bool = True,\n sparse_mv_attention: bool = False,\n mvcd_attention: bool=False\n ):\n super().__init__()\n\n self.has_cross_attention = True\n self.num_attention_heads = num_attention_heads\n resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)\n\n # there is always at least one resnet\n resnets = [\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n ]\n attentions = []\n\n for _ in range(num_layers):\n if not dual_cross_attention:\n attentions.append(\n TransformerMV2DModel(\n num_attention_heads,\n in_channels // num_attention_heads,\n in_channels=in_channels,\n num_layers=transformer_layers_per_block,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n num_views=num_views,\n cd_attention_last=cd_attention_last,\n cd_attention_mid=cd_attention_mid,\n multiview_attention=multiview_attention,\n sparse_mv_attention=sparse_mv_attention,\n mvcd_attention=mvcd_attention\n )\n )\n else:\n raise NotImplementedError\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n\n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ) -> torch.FloatTensor:\n hidden_states = self.resnets[0](hidden_states, temb)\n for attn, resnet in zip(self.attentions, self.resnets[1:]):\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n hidden_states = resnet(hidden_states, temb)\n\n return hidden_states" }, { "identifier": "get_down_block", "path": "mvdiffusion/models/unet_mv2d_blocks.py", "snippet": "def get_down_block(\n down_block_type,\n num_layers,\n in_channels,\n out_channels,\n temb_channels,\n add_downsample,\n resnet_eps,\n resnet_act_fn,\n transformer_layers_per_block=1,\n num_attention_heads=None,\n resnet_groups=None,\n cross_attention_dim=None,\n downsample_padding=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n resnet_skip_time_act=False,\n resnet_out_scale_factor=1.0,\n cross_attention_norm=None,\n attention_head_dim=None,\n downsample_type=None,\n num_views=1,\n cd_attention_last: bool = False,\n cd_attention_mid: bool = False,\n multiview_attention: bool = True,\n sparse_mv_attention: bool = False,\n mvcd_attention: bool=False\n):\n # If attn head dim is not defined, we default it to the number of heads\n if attention_head_dim is None:\n logger.warn(\n f\"It is recommended to provide `attention_head_dim` when calling `get_down_block`. Defaulting `attention_head_dim` to {num_attention_heads}.\"\n )\n attention_head_dim = num_attention_heads\n\n down_block_type = down_block_type[7:] if down_block_type.startswith(\"UNetRes\") else down_block_type\n if down_block_type == \"DownBlock2D\":\n return DownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"ResnetDownsampleBlock2D\":\n return ResnetDownsampleBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n )\n elif down_block_type == \"AttnDownBlock2D\":\n if add_downsample is False:\n downsample_type = None\n else:\n downsample_type = downsample_type or \"conv\" # default to 'conv'\n return AttnDownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n downsample_type=downsample_type,\n )\n elif down_block_type == \"CrossAttnDownBlock2D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnDownBlock2D\")\n return CrossAttnDownBlock2D(\n num_layers=num_layers,\n transformer_layers_per_block=transformer_layers_per_block,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=num_attention_heads,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n # custom MV2D attention block\n elif down_block_type == \"CrossAttnDownBlockMV2D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnDownBlockMV2D\")\n return CrossAttnDownBlockMV2D(\n num_layers=num_layers,\n transformer_layers_per_block=transformer_layers_per_block,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=num_attention_heads,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n num_views=num_views,\n cd_attention_last=cd_attention_last,\n cd_attention_mid=cd_attention_mid,\n multiview_attention=multiview_attention,\n sparse_mv_attention=sparse_mv_attention,\n mvcd_attention=mvcd_attention\n )\n elif down_block_type == \"SimpleCrossAttnDownBlock2D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for SimpleCrossAttnDownBlock2D\")\n return SimpleCrossAttnDownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n only_cross_attention=only_cross_attention,\n cross_attention_norm=cross_attention_norm,\n )\n elif down_block_type == \"SkipDownBlock2D\":\n return SkipDownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"AttnSkipDownBlock2D\":\n return AttnSkipDownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"DownEncoderBlock2D\":\n return DownEncoderBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"AttnDownEncoderBlock2D\":\n return AttnDownEncoderBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"KDownBlock2D\":\n return KDownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n )\n elif down_block_type == \"KCrossAttnDownBlock2D\":\n return KCrossAttnDownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n cross_attention_dim=cross_attention_dim,\n attention_head_dim=attention_head_dim,\n add_self_attention=True if not add_downsample else False,\n )\n raise ValueError(f\"{down_block_type} does not exist.\")" }, { "identifier": "get_up_block", "path": "mvdiffusion/models/unet_mv2d_blocks.py", "snippet": "def get_up_block(\n up_block_type,\n num_layers,\n in_channels,\n out_channels,\n prev_output_channel,\n temb_channels,\n add_upsample,\n resnet_eps,\n resnet_act_fn,\n transformer_layers_per_block=1,\n num_attention_heads=None,\n resnet_groups=None,\n cross_attention_dim=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n resnet_skip_time_act=False,\n resnet_out_scale_factor=1.0,\n cross_attention_norm=None,\n attention_head_dim=None,\n upsample_type=None,\n num_views=1,\n cd_attention_last: bool = False,\n cd_attention_mid: bool = False,\n multiview_attention: bool = True,\n sparse_mv_attention: bool = False,\n mvcd_attention: bool=False\n):\n # If attn head dim is not defined, we default it to the number of heads\n if attention_head_dim is None:\n logger.warn(\n f\"It is recommended to provide `attention_head_dim` when calling `get_up_block`. Defaulting `attention_head_dim` to {num_attention_heads}.\"\n )\n attention_head_dim = num_attention_heads\n\n up_block_type = up_block_type[7:] if up_block_type.startswith(\"UNetRes\") else up_block_type\n if up_block_type == \"UpBlock2D\":\n return UpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif up_block_type == \"ResnetUpsampleBlock2D\":\n return ResnetUpsampleBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n )\n elif up_block_type == \"CrossAttnUpBlock2D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnUpBlock2D\")\n return CrossAttnUpBlock2D(\n num_layers=num_layers,\n transformer_layers_per_block=transformer_layers_per_block,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=num_attention_heads,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n # custom MV2D attention block\n elif up_block_type == \"CrossAttnUpBlockMV2D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnUpBlockMV2D\")\n return CrossAttnUpBlockMV2D(\n num_layers=num_layers,\n transformer_layers_per_block=transformer_layers_per_block,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=num_attention_heads,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n num_views=num_views,\n cd_attention_last=cd_attention_last,\n cd_attention_mid=cd_attention_mid,\n multiview_attention=multiview_attention,\n sparse_mv_attention=sparse_mv_attention,\n mvcd_attention=mvcd_attention\n ) \n elif up_block_type == \"SimpleCrossAttnUpBlock2D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for SimpleCrossAttnUpBlock2D\")\n return SimpleCrossAttnUpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n only_cross_attention=only_cross_attention,\n cross_attention_norm=cross_attention_norm,\n )\n elif up_block_type == \"AttnUpBlock2D\":\n if add_upsample is False:\n upsample_type = None\n else:\n upsample_type = upsample_type or \"conv\" # default to 'conv'\n\n return AttnUpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n upsample_type=upsample_type,\n )\n elif up_block_type == \"SkipUpBlock2D\":\n return SkipUpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif up_block_type == \"AttnSkipUpBlock2D\":\n return AttnSkipUpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif up_block_type == \"UpDecoderBlock2D\":\n return UpDecoderBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n temb_channels=temb_channels,\n )\n elif up_block_type == \"AttnUpDecoderBlock2D\":\n return AttnUpDecoderBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n temb_channels=temb_channels,\n )\n elif up_block_type == \"KUpBlock2D\":\n return KUpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n )\n elif up_block_type == \"KCrossAttnUpBlock2D\":\n return KCrossAttnUpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n cross_attention_dim=cross_attention_dim,\n attention_head_dim=attention_head_dim,\n )\n\n raise ValueError(f\"{up_block_type} does not exist.\")" } ]
from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.loaders import UNet2DConditionLoadersMixin from diffusers.utils import BaseOutput, logging from diffusers.models.activations import get_activation from diffusers.models.attention_processor import AttentionProcessor, AttnProcessor from diffusers.models.embeddings import ( GaussianFourierProjection, ImageHintTimeEmbedding, ImageProjection, ImageTimeEmbedding, TextImageProjection, TextImageTimeEmbedding, TextTimeEmbedding, TimestepEmbedding, Timesteps, ) from diffusers.models.modeling_utils import ModelMixin, load_state_dict, _load_state_dict_into_model from diffusers.models.unet_2d_blocks import ( CrossAttnDownBlock2D, CrossAttnUpBlock2D, DownBlock2D, UNetMidBlock2DCrossAttn, UNetMidBlock2DSimpleCrossAttn, UpBlock2D, ) from diffusers.utils import ( CONFIG_NAME, DIFFUSERS_CACHE, FLAX_WEIGHTS_NAME, HF_HUB_OFFLINE, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, _add_variant, _get_model_file, deprecate, is_accelerate_available, is_safetensors_available, is_torch_version, logging, ) from diffusers import __version__ from mvdiffusion.models.unet_mv2d_blocks import ( CrossAttnDownBlockMV2D, CrossAttnUpBlockMV2D, UNetMidBlockMV2DCrossAttn, get_down_block, get_up_block, ) import os import torch import torch.nn as nn import torch.utils.checkpoint import copy
11,063
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "set_processor"): processors[f"{name}.processor"] = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ self.set_attn_processor(AttnProcessor()) def set_attention_slice(self, slice_size): r""" Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor in slices to compute attention in several steps. This is useful for saving some memory in exchange for a small decrease in speed. Args: slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` must be a multiple of `slice_size`. """ sliceable_head_dims = [] def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): if hasattr(module, "set_attention_slice"): sliceable_head_dims.append(module.sliceable_head_dim) for child in module.children(): fn_recursive_retrieve_sliceable_dims(child) # retrieve number of attention layers for module in self.children(): fn_recursive_retrieve_sliceable_dims(module) num_sliceable_layers = len(sliceable_head_dims) if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory slice_size = [dim // 2 for dim in sliceable_head_dims] elif slice_size == "max": # make smallest slice possible slice_size = num_sliceable_layers * [1] slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size if len(slice_size) != len(sliceable_head_dims): raise ValueError( f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." ) for i in range(len(slice_size)): size = slice_size[i] dim = sliceable_head_dims[i] if size is not None and size > dim: raise ValueError(f"size {size} has to be smaller or equal to {dim}.") # Recursively walk through all the children. # Any children which exposes the set_attention_slice method # gets the message def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): if hasattr(module, "set_attention_slice"): module.set_attention_slice(slice_size.pop()) for child in module.children(): fn_recursive_set_attention_slice(child, slice_size) reversed_slice_size = list(reversed(slice_size)) for module in self.children(): fn_recursive_set_attention_slice(module, reversed_slice_size) def _set_gradient_checkpointing(self, module, value=False):
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class UNetMV2DConditionOutput(BaseOutput): """ The output of [`UNet2DConditionModel`]. Args: sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model. """ sample: torch.FloatTensor = None class UNetMV2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin): r""" A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample shaped output. This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). Parameters: sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): Height and width of input/output sample. in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample. out_channels (`int`, *optional*, defaults to 4): Number of channels in the output. center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample. flip_sin_to_cos (`bool`, *optional*, defaults to `False`): Whether to flip the sin to cos in the time embedding. freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): The tuple of downsample blocks to use. mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2DCrossAttn"`): Block type for middle of UNet, it can be either `UNetMidBlock2DCrossAttn` or `UNetMidBlock2DSimpleCrossAttn`. If `None`, the mid block layer is skipped. up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`): The tuple of upsample blocks to use. only_cross_attention(`bool` or `Tuple[bool]`, *optional*, default to `False`): Whether to include self-attention in the basic transformer blocks, see [`~models.attention.BasicTransformerBlock`]. block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution. mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block. act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. If `None`, normalization and activation layers is skipped in post-processing. norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization. cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280): The dimension of the cross attention features. transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1): The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`], [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`]. encoder_hid_dim (`int`, *optional*, defaults to None): If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim` dimension to `cross_attention_dim`. encoder_hid_dim_type (`str`, *optional*, defaults to `None`): If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`. attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads. num_attention_heads (`int`, *optional*): The number of attention heads. If not defined, defaults to `attention_head_dim` resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`. class_embed_type (`str`, *optional*, defaults to `None`): The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`, `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`. addition_embed_type (`str`, *optional*, defaults to `None`): Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or "text". "text" will use the `TextTimeEmbedding` layer. addition_time_embed_dim: (`int`, *optional*, defaults to `None`): Dimension for the timestep embeddings. num_class_embeds (`int`, *optional*, defaults to `None`): Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing class conditioning with `class_embed_type` equal to `None`. time_embedding_type (`str`, *optional*, defaults to `positional`): The type of position embedding to use for timesteps. Choose from `positional` or `fourier`. time_embedding_dim (`int`, *optional*, defaults to `None`): An optional override for the dimension of the projected time embedding. time_embedding_act_fn (`str`, *optional*, defaults to `None`): Optional activation function to use only once on the time embeddings before they are passed to the rest of the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`. timestep_post_act (`str`, *optional*, defaults to `None`): The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`. time_cond_proj_dim (`int`, *optional*, defaults to `None`): The dimension of `cond_proj` layer in the timestep embedding. conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer. conv_out_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_out` layer. projection_class_embeddings_input_dim (`int`, *optional*): The dimension of the `class_labels` input when `class_embed_type="projection"`. Required when `class_embed_type="projection"`. class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time embeddings with the class embeddings. mid_block_only_cross_attention (`bool`, *optional*, defaults to `None`): Whether to use cross attention with the mid block when using the `UNetMidBlock2DSimpleCrossAttn`. If `only_cross_attention` is given as a single boolean and `mid_block_only_cross_attention` is `None`, the `only_cross_attention` value is used as the value for `mid_block_only_cross_attention`. Default to `False` otherwise. """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, sample_size: Optional[int] = None, in_channels: int = 4, out_channels: int = 4, center_input_sample: bool = False, flip_sin_to_cos: bool = True, freq_shift: int = 0, down_block_types: Tuple[str] = ( "CrossAttnDownBlockMV2D", "CrossAttnDownBlockMV2D", "CrossAttnDownBlockMV2D", "DownBlock2D", ), mid_block_type: Optional[str] = "UNetMidBlockMV2DCrossAttn", up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlockMV2D", "CrossAttnUpBlockMV2D", "CrossAttnUpBlockMV2D"), only_cross_attention: Union[bool, Tuple[bool]] = False, block_out_channels: Tuple[int] = (320, 640, 1280, 1280), layers_per_block: Union[int, Tuple[int]] = 2, downsample_padding: int = 1, mid_block_scale_factor: float = 1, act_fn: str = "silu", norm_num_groups: Optional[int] = 32, norm_eps: float = 1e-5, cross_attention_dim: Union[int, Tuple[int]] = 1280, transformer_layers_per_block: Union[int, Tuple[int]] = 1, encoder_hid_dim: Optional[int] = None, encoder_hid_dim_type: Optional[str] = None, attention_head_dim: Union[int, Tuple[int]] = 8, num_attention_heads: Optional[Union[int, Tuple[int]]] = None, dual_cross_attention: bool = False, use_linear_projection: bool = False, class_embed_type: Optional[str] = None, addition_embed_type: Optional[str] = None, addition_time_embed_dim: Optional[int] = None, num_class_embeds: Optional[int] = None, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", resnet_skip_time_act: bool = False, resnet_out_scale_factor: int = 1.0, time_embedding_type: str = "positional", time_embedding_dim: Optional[int] = None, time_embedding_act_fn: Optional[str] = None, timestep_post_act: Optional[str] = None, time_cond_proj_dim: Optional[int] = None, conv_in_kernel: int = 3, conv_out_kernel: int = 3, projection_class_embeddings_input_dim: Optional[int] = None, class_embeddings_concat: bool = False, mid_block_only_cross_attention: Optional[bool] = None, cross_attention_norm: Optional[str] = None, addition_embed_type_num_heads=64, num_views: int = 1, cd_attention_last: bool = False, cd_attention_mid: bool = False, multiview_attention: bool = True, sparse_mv_attention: bool = False, mvcd_attention: bool = False ): super().__init__() self.sample_size = sample_size if num_attention_heads is not None: raise ValueError( "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. num_attention_heads = num_attention_heads or attention_head_dim # Check inputs if len(down_block_types) != len(up_block_types): raise ValueError( f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." ) if len(block_out_channels) != len(down_block_types): raise ValueError( f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." ) if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): raise ValueError( f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." ) if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError( f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." ) if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types): raise ValueError( f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}." ) if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types): raise ValueError( f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}." ) if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types): raise ValueError( f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}." ) # input conv_in_padding = (conv_in_kernel - 1) // 2 self.conv_in = nn.Conv2d( in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding ) # time if time_embedding_type == "fourier": time_embed_dim = time_embedding_dim or block_out_channels[0] * 2 if time_embed_dim % 2 != 0: raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.") self.time_proj = GaussianFourierProjection( time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos ) timestep_input_dim = time_embed_dim elif time_embedding_type == "positional": time_embed_dim = time_embedding_dim or block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) timestep_input_dim = block_out_channels[0] else: raise ValueError( f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`." ) self.time_embedding = TimestepEmbedding( timestep_input_dim, time_embed_dim, act_fn=act_fn, post_act_fn=timestep_post_act, cond_proj_dim=time_cond_proj_dim, ) if encoder_hid_dim_type is None and encoder_hid_dim is not None: encoder_hid_dim_type = "text_proj" self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type) logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.") if encoder_hid_dim is None and encoder_hid_dim_type is not None: raise ValueError( f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}." ) if encoder_hid_dim_type == "text_proj": self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim) elif encoder_hid_dim_type == "text_image_proj": # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image_proj"` (Kadinsky 2.1)` self.encoder_hid_proj = TextImageProjection( text_embed_dim=encoder_hid_dim, image_embed_dim=cross_attention_dim, cross_attention_dim=cross_attention_dim, ) elif encoder_hid_dim_type == "image_proj": # Kandinsky 2.2 self.encoder_hid_proj = ImageProjection( image_embed_dim=encoder_hid_dim, cross_attention_dim=cross_attention_dim, ) elif encoder_hid_dim_type is not None: raise ValueError( f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'." ) else: self.encoder_hid_proj = None # class embedding if class_embed_type is None and num_class_embeds is not None: self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) elif class_embed_type == "timestep": self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn) elif class_embed_type == "identity": self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) elif class_embed_type == "projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" ) # The projection `class_embed_type` is the same as the timestep `class_embed_type` except # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings # 2. it projects from an arbitrary input dimension. # # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations. # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings. # As a result, `TimestepEmbedding` can be passed arbitrary vectors. self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif class_embed_type == "simple_projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set" ) self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim) else: self.class_embedding = None if addition_embed_type == "text": if encoder_hid_dim is not None: text_time_embedding_from_dim = encoder_hid_dim else: text_time_embedding_from_dim = cross_attention_dim self.add_embedding = TextTimeEmbedding( text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads ) elif addition_embed_type == "text_image": # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image"` (Kadinsky 2.1)` self.add_embedding = TextImageTimeEmbedding( text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim ) elif addition_embed_type == "text_time": self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift) self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif addition_embed_type == "image": # Kandinsky 2.2 self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type == "image_hint": # Kandinsky 2.2 ControlNet self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type is not None: raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.") if time_embedding_act_fn is None: self.time_embed_act = None else: self.time_embed_act = get_activation(time_embedding_act_fn) self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(only_cross_attention, bool): if mid_block_only_cross_attention is None: mid_block_only_cross_attention = only_cross_attention only_cross_attention = [only_cross_attention] * len(down_block_types) if mid_block_only_cross_attention is None: mid_block_only_cross_attention = False if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) if isinstance(attention_head_dim, int): attention_head_dim = (attention_head_dim,) * len(down_block_types) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) * len(down_block_types) if isinstance(layers_per_block, int): layers_per_block = [layers_per_block] * len(down_block_types) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) if class_embeddings_concat: # The time embeddings are concatenated with the class embeddings. The dimension of the # time embeddings passed to the down, middle, and up blocks is twice the dimension of the # regular time embeddings blocks_time_embed_dim = time_embed_dim * 2 else: blocks_time_embed_dim = time_embed_dim # down output_channel = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block( down_block_type, num_layers=layers_per_block[i], transformer_layers_per_block=transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, temb_channels=blocks_time_embed_dim, add_downsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim[i], num_attention_heads=num_attention_heads[i], downsample_padding=downsample_padding, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, num_views=num_views, cd_attention_last=cd_attention_last, cd_attention_mid=cd_attention_mid, multiview_attention=multiview_attention, sparse_mv_attention=sparse_mv_attention, mvcd_attention=mvcd_attention ) self.down_blocks.append(down_block) # mid if mid_block_type == "UNetMidBlock2DCrossAttn": self.mid_block = UNetMidBlock2DCrossAttn( transformer_layers_per_block=transformer_layers_per_block[-1], in_channels=block_out_channels[-1], temb_channels=blocks_time_embed_dim, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_time_scale_shift=resnet_time_scale_shift, cross_attention_dim=cross_attention_dim[-1], num_attention_heads=num_attention_heads[-1], resnet_groups=norm_num_groups, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, ) # custom MV2D attention block elif mid_block_type == "UNetMidBlockMV2DCrossAttn": self.mid_block = UNetMidBlockMV2DCrossAttn( transformer_layers_per_block=transformer_layers_per_block[-1], in_channels=block_out_channels[-1], temb_channels=blocks_time_embed_dim, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_time_scale_shift=resnet_time_scale_shift, cross_attention_dim=cross_attention_dim[-1], num_attention_heads=num_attention_heads[-1], resnet_groups=norm_num_groups, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, num_views=num_views, cd_attention_last=cd_attention_last, cd_attention_mid=cd_attention_mid, multiview_attention=multiview_attention, sparse_mv_attention=sparse_mv_attention, mvcd_attention=mvcd_attention ) elif mid_block_type == "UNetMidBlock2DSimpleCrossAttn": self.mid_block = UNetMidBlock2DSimpleCrossAttn( in_channels=block_out_channels[-1], temb_channels=blocks_time_embed_dim, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, cross_attention_dim=cross_attention_dim[-1], attention_head_dim=attention_head_dim[-1], resnet_groups=norm_num_groups, resnet_time_scale_shift=resnet_time_scale_shift, skip_time_act=resnet_skip_time_act, only_cross_attention=mid_block_only_cross_attention, cross_attention_norm=cross_attention_norm, ) elif mid_block_type is None: self.mid_block = None else: raise ValueError(f"unknown mid_block_type : {mid_block_type}") # count how many layers upsample the images self.num_upsamplers = 0 # up reversed_block_out_channels = list(reversed(block_out_channels)) reversed_num_attention_heads = list(reversed(num_attention_heads)) reversed_layers_per_block = list(reversed(layers_per_block)) reversed_cross_attention_dim = list(reversed(cross_attention_dim)) reversed_transformer_layers_per_block = list(reversed(transformer_layers_per_block)) only_cross_attention = list(reversed(only_cross_attention)) output_channel = reversed_block_out_channels[0] for i, up_block_type in enumerate(up_block_types): is_final_block = i == len(block_out_channels) - 1 prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] # add upsample block for all BUT final layer if not is_final_block: add_upsample = True self.num_upsamplers += 1 else: add_upsample = False up_block = get_up_block( up_block_type, num_layers=reversed_layers_per_block[i] + 1, transformer_layers_per_block=reversed_transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, temb_channels=blocks_time_embed_dim, add_upsample=add_upsample, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=reversed_cross_attention_dim[i], num_attention_heads=reversed_num_attention_heads[i], dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, num_views=num_views, cd_attention_last=cd_attention_last, cd_attention_mid=cd_attention_mid, multiview_attention=multiview_attention, sparse_mv_attention=sparse_mv_attention, mvcd_attention=mvcd_attention ) self.up_blocks.append(up_block) prev_output_channel = output_channel # out if norm_num_groups is not None: self.conv_norm_out = nn.GroupNorm( num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps ) self.conv_act = get_activation(act_fn) else: self.conv_norm_out = None self.conv_act = None conv_out_padding = (conv_out_kernel - 1) // 2 self.conv_out = nn.Conv2d( block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding ) @property def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "set_processor"): processors[f"{name}.processor"] = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ self.set_attn_processor(AttnProcessor()) def set_attention_slice(self, slice_size): r""" Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor in slices to compute attention in several steps. This is useful for saving some memory in exchange for a small decrease in speed. Args: slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` must be a multiple of `slice_size`. """ sliceable_head_dims = [] def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): if hasattr(module, "set_attention_slice"): sliceable_head_dims.append(module.sliceable_head_dim) for child in module.children(): fn_recursive_retrieve_sliceable_dims(child) # retrieve number of attention layers for module in self.children(): fn_recursive_retrieve_sliceable_dims(module) num_sliceable_layers = len(sliceable_head_dims) if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory slice_size = [dim // 2 for dim in sliceable_head_dims] elif slice_size == "max": # make smallest slice possible slice_size = num_sliceable_layers * [1] slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size if len(slice_size) != len(sliceable_head_dims): raise ValueError( f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." ) for i in range(len(slice_size)): size = slice_size[i] dim = sliceable_head_dims[i] if size is not None and size > dim: raise ValueError(f"size {size} has to be smaller or equal to {dim}.") # Recursively walk through all the children. # Any children which exposes the set_attention_slice method # gets the message def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): if hasattr(module, "set_attention_slice"): module.set_attention_slice(slice_size.pop()) for child in module.children(): fn_recursive_set_attention_slice(child, slice_size) reversed_slice_size = list(reversed(slice_size)) for module in self.children(): fn_recursive_set_attention_slice(module, reversed_slice_size) def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (CrossAttnDownBlock2D, CrossAttnDownBlockMV2D, DownBlock2D, CrossAttnUpBlock2D, CrossAttnUpBlockMV2D, UpBlock2D)):
1
2023-10-14 12:18:38+00:00
16k
PixArt-alpha/PixArt-alpha
train_scripts/train_controlnet.py
[ { "identifier": "IDDPM", "path": "diffusion/iddpm.py", "snippet": "def IDDPM(\n timestep_respacing,\n noise_schedule=\"linear\",\n use_kl=False,\n sigma_small=False,\n predict_xstart=False,\n learn_sigma=True,\n pred_sigma=True,\n rescale_learned_sigmas=False,\n diffusion_steps=1000,\n snr=False,\n return_startx=False,\n):\n betas = gd.get_named_beta_schedule(noise_schedule, diffusion_steps)\n if use_kl:\n loss_type = gd.LossType.RESCALED_KL\n elif rescale_learned_sigmas:\n loss_type = gd.LossType.RESCALED_MSE\n else:\n loss_type = gd.LossType.MSE\n if timestep_respacing is None or timestep_respacing == \"\":\n timestep_respacing = [diffusion_steps]\n return SpacedDiffusion(\n use_timesteps=space_timesteps(diffusion_steps, timestep_respacing),\n betas=betas,\n model_mean_type=(\n gd.ModelMeanType.EPSILON if not predict_xstart else gd.ModelMeanType.START_X\n ),\n model_var_type=(\n ((\n gd.ModelVarType.FIXED_LARGE\n if not sigma_small\n else gd.ModelVarType.FIXED_SMALL\n )\n if not learn_sigma\n else gd.ModelVarType.LEARNED_RANGE\n )\n if pred_sigma\n else None\n ),\n loss_type=loss_type,\n snr=snr,\n return_startx=return_startx,\n # rescale_timesteps=rescale_timesteps,\n )" }, { "identifier": "build_dataset", "path": "diffusion/data/builder.py", "snippet": "def build_dataset(cfg, resolution=224, **kwargs):\n logger = get_root_logger()\n\n dataset_type = cfg.get('type')\n logger.info(f\"Constructing dataset {dataset_type}...\")\n t = time.time()\n transform = cfg.pop('transform', 'default_train')\n transform = get_transform(transform, resolution)\n dataset = build_from_cfg(cfg, DATASETS, default_args=dict(transform=transform, resolution=resolution, **kwargs))\n logger.info(f\"Dataset {dataset_type} constructed. time: {(time.time() - t):.2f} s, length (use/ori): {len(dataset)}/{dataset.ori_imgs_nums}\")\n return dataset" }, { "identifier": "build_dataloader", "path": "diffusion/data/builder.py", "snippet": "def build_dataloader(dataset, batch_size=256, num_workers=4, shuffle=True, **kwargs):\n if 'batch_sampler' in kwargs:\n dataloader = DataLoader(dataset, batch_sampler=kwargs['batch_sampler'], num_workers=num_workers, pin_memory=True)\n else:\n dataloader = DataLoader(dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n pin_memory=True,\n **kwargs)\n return dataloader" }, { "identifier": "set_data_root", "path": "diffusion/data/builder.py", "snippet": "def set_data_root(data_root):\n global DATA_ROOT\n DATA_ROOT = data_root" }, { "identifier": "build_model", "path": "diffusion/model/builder.py", "snippet": "def build_model(cfg, use_grad_checkpoint=False, use_fp32_attention=False, gc_step=1, **kwargs):\n if isinstance(cfg, str):\n cfg = dict(type=cfg)\n model = MODELS.build(cfg, default_args=kwargs)\n if use_grad_checkpoint:\n set_grad_checkpoint(model, use_fp32_attention=use_fp32_attention, gc_step=gc_step)\n return model" }, { "identifier": "PixArtMS", "path": "diffusion/model/nets/PixArtMS.py", "snippet": "class PixArtMS(PixArt):\n \"\"\"\n Diffusion model with a Transformer backbone.\n \"\"\"\n\n def __init__(\n self,\n input_size=32,\n patch_size=2,\n in_channels=4,\n hidden_size=1152,\n depth=28,\n num_heads=16,\n mlp_ratio=4.0,\n class_dropout_prob=0.1,\n learn_sigma=True,\n pred_sigma=True,\n drop_path: float = 0.,\n window_size=0,\n window_block_indexes=[],\n use_rel_pos=False,\n caption_channels=4096,\n lewei_scale=1.,\n config=None,\n model_max_length=120,\n **kwargs,\n ):\n super().__init__(\n input_size=input_size,\n patch_size=patch_size,\n in_channels=in_channels,\n hidden_size=hidden_size,\n depth=depth,\n num_heads=num_heads,\n mlp_ratio=mlp_ratio,\n class_dropout_prob=class_dropout_prob,\n learn_sigma=learn_sigma,\n pred_sigma=pred_sigma,\n drop_path=drop_path,\n window_size=window_size,\n window_block_indexes=window_block_indexes,\n use_rel_pos=use_rel_pos,\n lewei_scale=lewei_scale,\n config=config,\n model_max_length=model_max_length,\n **kwargs,\n )\n self.h = self.w = 0\n approx_gelu = lambda: nn.GELU(approximate=\"tanh\")\n self.t_block = nn.Sequential(\n nn.SiLU(),\n nn.Linear(hidden_size, 6 * hidden_size, bias=True)\n )\n self.x_embedder = PatchEmbed(patch_size, in_channels, hidden_size, bias=True)\n self.y_embedder = CaptionEmbedder(in_channels=caption_channels, hidden_size=hidden_size, uncond_prob=class_dropout_prob, act_layer=approx_gelu, token_num=model_max_length)\n self.csize_embedder = SizeEmbedder(hidden_size//3) # c_size embed\n self.ar_embedder = SizeEmbedder(hidden_size//3) # aspect ratio embed\n drop_path = [x.item() for x in torch.linspace(0, drop_path, depth)] # stochastic depth decay rule\n self.blocks = nn.ModuleList([\n PixArtMSBlock(hidden_size, num_heads, mlp_ratio=mlp_ratio, drop_path=drop_path[i],\n input_size=(input_size // patch_size, input_size // patch_size),\n window_size=window_size if i in window_block_indexes else 0,\n use_rel_pos=use_rel_pos if i in window_block_indexes else False)\n for i in range(depth)\n ])\n self.final_layer = T2IFinalLayer(hidden_size, patch_size, self.out_channels)\n\n self.initialize()\n\n def forward(self, x, timestep, y, mask=None, data_info=None, **kwargs):\n \"\"\"\n Forward pass of PixArt.\n x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)\n t: (N,) tensor of diffusion timesteps\n y: (N, 1, 120, C) tensor of class labels\n \"\"\"\n bs = x.shape[0]\n c_size, ar = data_info['img_hw'], data_info['aspect_ratio']\n self.h, self.w = x.shape[-2]//self.patch_size, x.shape[-1]//self.patch_size\n pos_embed = torch.from_numpy(get_2d_sincos_pos_embed(self.pos_embed.shape[-1], (self.h, self.w), lewei_scale=self.lewei_scale, base_size=self.base_size)).float().unsqueeze(0).to(x.device)\n x = self.x_embedder(x) + pos_embed # (N, T, D), where T = H * W / patch_size ** 2\n t = self.t_embedder(timestep) # (N, D)\n csize = self.csize_embedder(c_size, bs) # (N, D)\n ar = self.ar_embedder(ar, bs) # (N, D)\n t = t + torch.cat([csize, ar], dim=1)\n t0 = self.t_block(t)\n y = self.y_embedder(y, self.training) # (N, D)\n if mask is not None:\n if mask.shape[0] != y.shape[0]:\n mask = mask.repeat(y.shape[0] // mask.shape[0], 1)\n mask = mask.squeeze(1).squeeze(1)\n y = y.squeeze(1).masked_select(mask.unsqueeze(-1) != 0).view(1, -1, x.shape[-1])\n y_lens = mask.sum(dim=1).tolist()\n else:\n y_lens = [y.shape[2]] * y.shape[0]\n y = y.squeeze(1).view(1, -1, x.shape[-1])\n for block in self.blocks:\n x = auto_grad_checkpoint(block, x, y, t0, y_lens, **kwargs) # (N, T, D) #support grad checkpoint\n x = self.final_layer(x, t) # (N, T, patch_size ** 2 * out_channels)\n x = self.unpatchify(x) # (N, out_channels, H, W)\n return x\n\n def forward_with_dpmsolver(self, x, timestep, y, data_info, **kwargs):\n \"\"\"\n dpm solver donnot need variance prediction\n \"\"\"\n # https://github.com/openai/glide-text2im/blob/main/notebooks/text2im.ipynb\n model_out = self.forward(x, timestep, y, data_info=data_info, **kwargs)\n return model_out.chunk(2, dim=1)[0]\n\n def forward_with_cfg(self, x, timestep, y, cfg_scale, data_info, **kwargs):\n \"\"\"\n Forward pass of PixArt, but also batches the unconditional forward pass for classifier-free guidance.\n \"\"\"\n # https://github.com/openai/glide-text2im/blob/main/notebooks/text2im.ipynb\n half = x[: len(x) // 2]\n combined = torch.cat([half, half], dim=0)\n model_out = self.forward(combined, timestep, y, data_info=data_info)\n eps, rest = model_out[:, :3], model_out[:, 3:]\n cond_eps, uncond_eps = torch.split(eps, len(eps) // 2, dim=0)\n half_eps = uncond_eps + cfg_scale * (cond_eps - uncond_eps)\n eps = torch.cat([half_eps, half_eps], dim=0)\n return torch.cat([eps, rest], dim=1)\n\n def unpatchify(self, x):\n \"\"\"\n x: (N, T, patch_size**2 * C)\n imgs: (N, H, W, C)\n \"\"\"\n c = self.out_channels\n p = self.x_embedder.patch_size[0]\n assert self.h * self.w == x.shape[1]\n\n x = x.reshape(shape=(x.shape[0], self.h, self.w, p, p, c))\n x = torch.einsum('nhwpqc->nchpwq', x)\n imgs = x.reshape(shape=(x.shape[0], c, self.h * p, self.w * p))\n return imgs\n\n def initialize(self):\n # Initialize transformer layers:\n def _basic_init(module):\n if isinstance(module, nn.Linear):\n torch.nn.init.xavier_uniform_(module.weight)\n if module.bias is not None:\n nn.init.constant_(module.bias, 0)\n\n self.apply(_basic_init)\n\n # Initialize patch_embed like nn.Linear (instead of nn.Conv2d):\n w = self.x_embedder.proj.weight.data\n nn.init.xavier_uniform_(w.view([w.shape[0], -1]))\n\n # Initialize timestep embedding MLP:\n nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)\n nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)\n nn.init.normal_(self.t_block[1].weight, std=0.02)\n nn.init.normal_(self.csize_embedder.mlp[0].weight, std=0.02)\n nn.init.normal_(self.csize_embedder.mlp[2].weight, std=0.02)\n nn.init.normal_(self.ar_embedder.mlp[0].weight, std=0.02)\n nn.init.normal_(self.ar_embedder.mlp[2].weight, std=0.02)\n\n # Initialize caption embedding MLP:\n nn.init.normal_(self.y_embedder.y_proj.fc1.weight, std=0.02)\n nn.init.normal_(self.y_embedder.y_proj.fc2.weight, std=0.02)\n\n # Zero-out adaLN modulation layers in PixArt blocks:\n for block in self.blocks:\n nn.init.constant_(block.cross_attn.proj.weight, 0)\n nn.init.constant_(block.cross_attn.proj.bias, 0)\n\n # Zero-out output layers:\n nn.init.constant_(self.final_layer.linear.weight, 0)\n nn.init.constant_(self.final_layer.linear.bias, 0)" }, { "identifier": "ControlPixArtHalf", "path": "diffusion/model/nets/pixart_controlnet.py", "snippet": "class ControlPixArtHalf(Module):\n # only support single res model\n def __init__(self, base_model: PixArt, copy_blocks_num: int = 13) -> None:\n super().__init__()\n self.base_model = base_model.eval()\n self.controlnet = []\n self.copy_blocks_num = copy_blocks_num\n self.total_blocks_num = len(base_model.blocks)\n for p in self.base_model.parameters():\n p.requires_grad_(False)\n\n # Copy first copy_blocks_num block\n for i in range(copy_blocks_num):\n self.controlnet.append(ControlT2IDitBlockHalf(base_model.blocks[i], i))\n self.controlnet = nn.ModuleList(self.controlnet)\n \n def __getattr__(self, name: str) -> Tensor or Module:\n if name in ['forward', 'forward_with_dpmsolver', 'forward_with_cfg', 'forward_c', 'load_state_dict']:\n return self.__dict__[name]\n elif name in ['base_model', 'controlnet']:\n return super().__getattr__(name)\n else:\n return getattr(self.base_model, name)\n\n def forward_c(self, c):\n self.h, self.w = c.shape[-2]//self.patch_size, c.shape[-1]//self.patch_size\n pos_embed = torch.from_numpy(get_2d_sincos_pos_embed(self.pos_embed.shape[-1], (self.h, self.w), lewei_scale=self.lewei_scale, base_size=self.base_size)).unsqueeze(0).to(c.device).to(self.dtype)\n return self.x_embedder(c) + pos_embed if c is not None else c\n\n # def forward(self, x, t, c, **kwargs):\n # return self.base_model(x, t, c=self.forward_c(c), **kwargs)\n def forward(self, x, timestep, y, mask=None, data_info=None, c=None, **kwargs):\n # modify the original PixArtMS forward function\n if c is not None:\n c = c.to(self.dtype)\n c = self.forward_c(c)\n \"\"\"\n Forward pass of PixArt.\n x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)\n t: (N,) tensor of diffusion timesteps\n y: (N, 1, 120, C) tensor of class labels\n \"\"\"\n x = x.to(self.dtype)\n timestep = timestep.to(self.dtype)\n y = y.to(self.dtype)\n pos_embed = self.pos_embed.to(self.dtype)\n self.h, self.w = x.shape[-2]//self.patch_size, x.shape[-1]//self.patch_size\n x = self.x_embedder(x) + pos_embed # (N, T, D), where T = H * W / patch_size ** 2\n t = self.t_embedder(timestep.to(x.dtype)) # (N, D)\n t0 = self.t_block(t)\n y = self.y_embedder(y, self.training) # (N, 1, L, D)\n if mask is not None:\n if mask.shape[0] != y.shape[0]:\n mask = mask.repeat(y.shape[0] // mask.shape[0], 1)\n mask = mask.squeeze(1).squeeze(1)\n y = y.squeeze(1).masked_select(mask.unsqueeze(-1) != 0).view(1, -1, x.shape[-1])\n y_lens = mask.sum(dim=1).tolist()\n else:\n y_lens = [y.shape[2]] * y.shape[0]\n y = y.squeeze(1).view(1, -1, x.shape[-1])\n\n # define the first layer\n x = auto_grad_checkpoint(self.base_model.blocks[0], x, y, t0, y_lens, **kwargs) # (N, T, D) #support grad checkpoint\n\n if c is not None:\n # update c\n for index in range(1, self.copy_blocks_num + 1):\n c, c_skip = auto_grad_checkpoint(self.controlnet[index - 1], x, y, t0, y_lens, c, **kwargs)\n x = auto_grad_checkpoint(self.base_model.blocks[index], x + c_skip, y, t0, y_lens, **kwargs)\n \n # update x\n for index in range(self.copy_blocks_num + 1, self.total_blocks_num):\n x = auto_grad_checkpoint(self.base_model.blocks[index], x, y, t0, y_lens, **kwargs)\n else:\n for index in range(1, self.total_blocks_num):\n x = auto_grad_checkpoint(self.base_model.blocks[index], x, y, t0, y_lens, **kwargs)\n\n x = self.final_layer(x, t) # (N, T, patch_size ** 2 * out_channels)\n x = self.unpatchify(x) # (N, out_channels, H, W)\n return x\n\n def forward_with_dpmsolver(self, x, t, y, data_info, c, **kwargs):\n model_out = self.forward(x, t, y, data_info=data_info, c=c, **kwargs)\n return model_out.chunk(2, dim=1)[0]\n\n # def forward_with_dpmsolver(self, x, t, y, data_info, c, **kwargs):\n # return self.base_model.forward_with_dpmsolver(x, t, y, data_info=data_info, c=self.forward_c(c), **kwargs)\n\n def forward_with_cfg(self, x, t, y, cfg_scale, data_info, c, **kwargs):\n return self.base_model.forward_with_cfg(x, t, y, cfg_scale, data_info, c=self.forward_c(c), **kwargs)\n\n def load_state_dict(self, state_dict: Mapping[str, Any], strict: bool = True):\n if all((k.startswith('base_model') or k.startswith('controlnet')) for k in state_dict.keys()):\n return super().load_state_dict(state_dict, strict)\n else:\n new_key = {}\n for k in state_dict.keys():\n new_key[k] = re.sub(r\"(blocks\\.\\d+)(.*)\", r\"\\1.base_block\\2\", k)\n for k, v in new_key.items():\n if k != v:\n print(f\"replace {k} to {v}\")\n state_dict[v] = state_dict.pop(k)\n\n return self.base_model.load_state_dict(state_dict, strict)\n \n def unpatchify(self, x):\n \"\"\"\n x: (N, T, patch_size**2 * C)\n imgs: (N, H, W, C)\n \"\"\"\n c = self.out_channels\n p = self.x_embedder.patch_size[0]\n assert self.h * self.w == x.shape[1]\n\n x = x.reshape(shape=(x.shape[0], self.h, self.w, p, p, c))\n x = torch.einsum('nhwpqc->nchpwq', x)\n imgs = x.reshape(shape=(x.shape[0], c, self.h * p, self.w * p))\n return imgs\n\n @property\n def dtype(self):\n # 返回模型参数的数据类型\n return next(self.parameters()).dtype" }, { "identifier": "ControlPixArtMSHalf", "path": "diffusion/model/nets/pixart_controlnet.py", "snippet": "class ControlPixArtMSHalf(ControlPixArtHalf):\n # support multi-scale res model (multi-scale model can also be applied to single reso training & inference)\n def __init__(self, base_model: PixArtMS, copy_blocks_num: int = 13) -> None:\n super().__init__(base_model=base_model, copy_blocks_num=copy_blocks_num)\n\n def forward(self, x, timestep, y, mask=None, data_info=None, c=None, **kwargs):\n # modify the original PixArtMS forward function\n \"\"\"\n Forward pass of PixArt.\n x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)\n t: (N,) tensor of diffusion timesteps\n y: (N, 1, 120, C) tensor of class labels\n \"\"\"\n if c is not None:\n c = c.to(self.dtype)\n c = self.forward_c(c)\n bs = x.shape[0]\n x = x.to(self.dtype)\n timestep = timestep.to(self.dtype)\n y = y.to(self.dtype)\n c_size, ar = data_info['img_hw'].to(self.dtype), data_info['aspect_ratio'].to(self.dtype)\n self.h, self.w = x.shape[-2]//self.patch_size, x.shape[-1]//self.patch_size\n\n pos_embed = torch.from_numpy(get_2d_sincos_pos_embed(self.pos_embed.shape[-1], (self.h, self.w), lewei_scale=self.lewei_scale, base_size=self.base_size)).unsqueeze(0).to(x.device).to(self.dtype)\n x = self.x_embedder(x) + pos_embed # (N, T, D), where T = H * W / patch_size ** 2\n t = self.t_embedder(timestep) # (N, D)\n csize = self.csize_embedder(c_size, bs) # (N, D)\n ar = self.ar_embedder(ar, bs) # (N, D)\n t = t + torch.cat([csize, ar], dim=1)\n t0 = self.t_block(t)\n y = self.y_embedder(y, self.training) # (N, D)\n if mask is not None:\n if mask.shape[0] != y.shape[0]:\n mask = mask.repeat(y.shape[0] // mask.shape[0], 1)\n mask = mask.squeeze(1).squeeze(1)\n y = y.squeeze(1).masked_select(mask.unsqueeze(-1) != 0).view(1, -1, x.shape[-1])\n y_lens = mask.sum(dim=1).tolist()\n else:\n y_lens = [y.shape[2]] * y.shape[0]\n y = y.squeeze(1).view(1, -1, x.shape[-1])\n\n # define the first layer\n x = auto_grad_checkpoint(self.base_model.blocks[0], x, y, t0, y_lens, **kwargs) # (N, T, D) #support grad checkpoint\n\n if c is not None:\n # update c\n for index in range(1, self.copy_blocks_num + 1):\n c, c_skip = auto_grad_checkpoint(self.controlnet[index - 1], x, y, t0, y_lens, c, **kwargs)\n x = auto_grad_checkpoint(self.base_model.blocks[index], x + c_skip, y, t0, y_lens, **kwargs)\n \n # update x\n for index in range(self.copy_blocks_num + 1, self.total_blocks_num):\n x = auto_grad_checkpoint(self.base_model.blocks[index], x, y, t0, y_lens, **kwargs)\n else:\n for index in range(1, self.total_blocks_num):\n x = auto_grad_checkpoint(self.base_model.blocks[index], x, y, t0, y_lens, **kwargs)\n\n x = self.final_layer(x, t) # (N, T, patch_size ** 2 * out_channels)\n x = self.unpatchify(x) # (N, out_channels, H, W)\n return x" }, { "identifier": "save_checkpoint", "path": "diffusion/utils/checkpoint.py", "snippet": "def save_checkpoint(work_dir,\n epoch,\n model,\n model_ema=None,\n optimizer=None,\n lr_scheduler=None,\n keep_last=False,\n step=None,\n ):\n os.makedirs(work_dir, exist_ok=True)\n state_dict = dict(state_dict=model.state_dict())\n if model_ema is not None:\n state_dict['state_dict_ema'] = model_ema.state_dict()\n if optimizer is not None:\n state_dict['optimizer'] = optimizer.state_dict()\n if lr_scheduler is not None:\n state_dict['scheduler'] = lr_scheduler.state_dict()\n if epoch is not None:\n state_dict['epoch'] = epoch\n file_path = os.path.join(work_dir, f\"epoch_{epoch}.pth\")\n if step is not None:\n file_path = file_path.split('.pth')[0] + f\"_step_{step}.pth\"\n logger = get_root_logger()\n torch.save(state_dict, file_path)\n logger.info(f'Saved checkpoint of epoch {epoch} to {file_path.format(epoch)}.')\n if keep_last:\n for i in range(epoch):\n previous_ckgt = file_path.format(i)\n if os.path.exists(previous_ckgt):\n os.remove(previous_ckgt)" }, { "identifier": "load_checkpoint", "path": "diffusion/utils/checkpoint.py", "snippet": "def load_checkpoint(checkpoint,\n model,\n model_ema=None,\n optimizer=None,\n lr_scheduler=None,\n load_ema=False,\n resume_optimizer=True,\n resume_lr_scheduler=True\n ):\n assert isinstance(checkpoint, str)\n ckpt_file = checkpoint\n checkpoint = torch.load(ckpt_file, map_location=\"cpu\")\n\n state_dict_keys = ['pos_embed', 'base_model.pos_embed', 'model.pos_embed']\n for key in state_dict_keys:\n if key in checkpoint['state_dict']:\n del checkpoint['state_dict'][key]\n if 'state_dict_ema' in checkpoint and key in checkpoint['state_dict_ema']:\n del checkpoint['state_dict_ema'][key]\n break\n\n if load_ema:\n state_dict = checkpoint['state_dict_ema']\n else:\n state_dict = checkpoint.get('state_dict', checkpoint) # to be compatible with the official checkpoint\n # model.load_state_dict(state_dict)\n missing, unexpect = model.load_state_dict(state_dict, strict=False)\n if model_ema is not None:\n model_ema.load_state_dict(checkpoint['state_dict_ema'], strict=False)\n if optimizer is not None and resume_optimizer:\n optimizer.load_state_dict(checkpoint['optimizer'])\n if lr_scheduler is not None and resume_lr_scheduler:\n lr_scheduler.load_state_dict(checkpoint['scheduler'])\n logger = get_root_logger()\n if optimizer is not None:\n epoch = checkpoint.get('epoch', re.match(r'.*epoch_(\\d*).*.pth', ckpt_file).group()[0])\n logger.info(f'Resume checkpoint of epoch {epoch} from {ckpt_file}. Load ema: {load_ema}, '\n f'resume optimizer: {resume_optimizer}, resume lr scheduler: {resume_lr_scheduler}.')\n return epoch, missing, unexpect\n logger.info(f'Load checkpoint from {ckpt_file}. Load ema: {load_ema}.')\n return missing, unexpect" }, { "identifier": "AspectRatioBatchSampler", "path": "diffusion/utils/data_sampler.py", "snippet": "class AspectRatioBatchSampler(BatchSampler):\n \"\"\"A sampler wrapper for grouping images with similar aspect ratio into a same batch.\n\n Args:\n sampler (Sampler): Base sampler.\n dataset (Dataset): Dataset providing data information.\n batch_size (int): Size of mini-batch.\n drop_last (bool): If ``True``, the sampler will drop the last batch if\n its size would be less than ``batch_size``.\n aspect_ratios (dict): The predefined aspect ratios.\n \"\"\"\n\n def __init__(self,\n sampler: Sampler,\n dataset: Dataset,\n batch_size: int,\n aspect_ratios: dict,\n drop_last: bool = False,\n config=None,\n valid_num=0, # take as valid aspect-ratio when sample number >= valid_num\n **kwargs) -> None:\n if not isinstance(sampler, Sampler):\n raise TypeError('sampler should be an instance of ``Sampler``, '\n f'but got {sampler}')\n if not isinstance(batch_size, int) or batch_size <= 0:\n raise ValueError('batch_size should be a positive integer value, '\n f'but got batch_size={batch_size}')\n self.sampler = sampler\n self.dataset = dataset\n self.batch_size = batch_size\n self.aspect_ratios = aspect_ratios\n self.drop_last = drop_last\n self.ratio_nums_gt = kwargs.get('ratio_nums', None)\n self.config = config\n assert self.ratio_nums_gt\n # buckets for each aspect ratio\n self._aspect_ratio_buckets = {ratio: [] for ratio in aspect_ratios.keys()}\n self.current_available_bucket_keys = [str(k) for k, v in self.ratio_nums_gt.items() if v >= valid_num]\n logger = get_root_logger() if config is None else get_root_logger(os.path.join(config.work_dir, 'train_log.log'))\n logger.warning(f\"Using valid_num={valid_num} in config file. Available {len(self.current_available_bucket_keys)} aspect_ratios: {self.current_available_bucket_keys}\")\n\n def __iter__(self) -> Sequence[int]:\n for idx in self.sampler:\n data_info = self.dataset.get_data_info(idx)\n height, width = data_info['height'], data_info['width']\n ratio = height / width\n # find the closest aspect ratio\n closest_ratio = min(self.aspect_ratios.keys(), key=lambda r: abs(float(r) - ratio))\n if closest_ratio not in self.current_available_bucket_keys:\n continue\n bucket = self._aspect_ratio_buckets[closest_ratio]\n bucket.append(idx)\n # yield a batch of indices in the same aspect ratio group\n if len(bucket) == self.batch_size:\n yield bucket[:]\n del bucket[:]\n\n # yield the rest data and reset the buckets\n for bucket in self._aspect_ratio_buckets.values():\n while len(bucket) > 0:\n if len(bucket) <= self.batch_size:\n if not self.drop_last:\n yield bucket[:]\n bucket = []\n else:\n yield bucket[:self.batch_size]\n bucket = bucket[self.batch_size:]" }, { "identifier": "BalancedAspectRatioBatchSampler", "path": "diffusion/utils/data_sampler.py", "snippet": "class BalancedAspectRatioBatchSampler(AspectRatioBatchSampler):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # Assign samples to each bucket\n self.ratio_nums_gt = kwargs.get('ratio_nums', None)\n assert self.ratio_nums_gt\n self._aspect_ratio_buckets = {float(ratio): [] for ratio in self.aspect_ratios.keys()}\n self.original_buckets = {}\n self.current_available_bucket_keys = [k for k, v in self.ratio_nums_gt.items() if v >= 3000]\n self.all_available_keys = deepcopy(self.current_available_bucket_keys)\n self.exhausted_bucket_keys = []\n self.total_batches = len(self.sampler) // self.batch_size\n self._aspect_ratio_count = {}\n for k in self.all_available_keys:\n self._aspect_ratio_count[float(k)] = 0\n self.original_buckets[float(k)] = []\n logger = get_root_logger(os.path.join(self.config.work_dir, 'train_log.log'))\n logger.warning(f\"Available {len(self.current_available_bucket_keys)} aspect_ratios: {self.current_available_bucket_keys}\")\n\n def __iter__(self) -> Sequence[int]:\n i = 0\n for idx in self.sampler:\n data_info = self.dataset.get_data_info(idx)\n height, width = data_info['height'], data_info['width']\n ratio = height / width\n closest_ratio = float(min(self.aspect_ratios.keys(), key=lambda r: abs(float(r) - ratio)))\n if closest_ratio not in self.all_available_keys:\n continue\n if self._aspect_ratio_count[closest_ratio] < self.ratio_nums_gt[closest_ratio]:\n self._aspect_ratio_count[closest_ratio] += 1\n self._aspect_ratio_buckets[closest_ratio].append(idx)\n self.original_buckets[closest_ratio].append(idx) # Save the original samples for each bucket\n if not self.current_available_bucket_keys:\n self.current_available_bucket_keys, self.exhausted_bucket_keys = self.exhausted_bucket_keys, []\n\n if closest_ratio not in self.current_available_bucket_keys:\n continue\n key = closest_ratio\n bucket = self._aspect_ratio_buckets[key]\n if len(bucket) == self.batch_size:\n yield bucket[:self.batch_size]\n del bucket[:self.batch_size]\n i += 1\n self.exhausted_bucket_keys.append(key)\n self.current_available_bucket_keys.remove(key)\n\n for _ in range(self.total_batches - i):\n key = choice(self.all_available_keys)\n bucket = self._aspect_ratio_buckets[key]\n if len(bucket) >= self.batch_size:\n yield bucket[:self.batch_size]\n del bucket[:self.batch_size]\n\n # If a bucket is exhausted\n if not bucket:\n self._aspect_ratio_buckets[key] = deepcopy(self.original_buckets[key][:])\n shuffle(self._aspect_ratio_buckets[key])\n else:\n self._aspect_ratio_buckets[key] = deepcopy(self.original_buckets[key][:])\n shuffle(self._aspect_ratio_buckets[key])" }, { "identifier": "synchronize", "path": "diffusion/utils/dist_utils.py", "snippet": "def synchronize():\n \"\"\"\n Helper function to synchronize (barrier) among all processes when\n using distributed training\n \"\"\"\n if not dist.is_available():\n return\n if not dist.is_initialized():\n return\n world_size = dist.get_world_size()\n if world_size == 1:\n return\n dist.barrier()" }, { "identifier": "get_world_size", "path": "diffusion/utils/dist_utils.py", "snippet": "def get_world_size():\n if not dist.is_available():\n return 1\n if not dist.is_initialized():\n return 1\n return dist.get_world_size()" }, { "identifier": "clip_grad_norm_", "path": "diffusion/utils/dist_utils.py", "snippet": "@torch.no_grad()\ndef clip_grad_norm_(\n self, max_norm: Union[float, int], norm_type: Union[float, int] = 2.0\n) -> None:\n self._lazy_init()\n self._wait_for_previous_optim_step()\n assert self._is_root, \"clip_grad_norm should only be called on the root (parent) instance\"\n self._assert_state(TrainingState_.IDLE)\n\n max_norm = float(max_norm)\n norm_type = float(norm_type)\n # Computes the max norm for this shard's gradients and sync's across workers\n local_norm = _calc_grad_norm(self.params_with_grad, norm_type).cuda() # type: ignore[arg-type]\n if norm_type == math.inf:\n total_norm = local_norm\n dist.all_reduce(total_norm, op=torch.distributed.ReduceOp.MAX, group=self.process_group)\n else:\n total_norm = local_norm ** norm_type\n dist.all_reduce(total_norm, group=self.process_group)\n total_norm = total_norm ** (1.0 / norm_type)\n\n clip_coef = torch.tensor(max_norm, dtype=total_norm.dtype, device=total_norm.device) / (total_norm + 1e-6)\n if clip_coef < 1:\n # multiply by clip_coef, aka, (max_norm/total_norm).\n for p in self.params_with_grad:\n assert p.grad is not None\n p.grad.detach().mul_(clip_coef.to(p.grad.device))\n return total_norm" }, { "identifier": "get_root_logger", "path": "diffusion/utils/logger.py", "snippet": "def get_root_logger(log_file=None, log_level=logging.INFO, name='PixArt'):\n \"\"\"Get root logger.\n\n Args:\n log_file (str, optional): File path of log. Defaults to None.\n log_level (int, optional): The level of logger.\n Defaults to logging.INFO.\n name (str): logger name\n Returns:\n :obj:`logging.Logger`: The obtained logger\n \"\"\"\n if log_file is None:\n log_file = '/dev/null'\n logger = get_logger(name=name, log_file=log_file, log_level=log_level)\n return logger" }, { "identifier": "build_lr_scheduler", "path": "diffusion/utils/lr_scheduler.py", "snippet": "def build_lr_scheduler(config, optimizer, train_dataloader, lr_scale_ratio):\n if not config.get('lr_schedule_args', None):\n config.lr_schedule_args = dict()\n if config.get('lr_warmup_steps', None):\n config['num_warmup_steps'] = config.get('lr_warmup_steps') # for compatibility with old version\n\n logger = get_root_logger()\n logger.info(\n f'Lr schedule: {config.lr_schedule}, ' + \",\".join(\n [f\"{key}:{value}\" for key, value in config.lr_schedule_args.items()]) + '.')\n if config.lr_schedule == 'cosine':\n lr_scheduler = get_cosine_schedule_with_warmup(\n optimizer=optimizer,\n **config.lr_schedule_args,\n num_training_steps=(len(train_dataloader) * config.num_epochs),\n )\n elif config.lr_schedule == 'constant':\n lr_scheduler = get_constant_schedule_with_warmup(\n optimizer=optimizer,\n **config.lr_schedule_args,\n )\n elif config.lr_schedule == 'cosine_decay_to_constant':\n assert lr_scale_ratio >= 1\n lr_scheduler = get_cosine_decay_to_constant_with_warmup(\n optimizer=optimizer,\n **config.lr_schedule_args,\n final_lr=1 / lr_scale_ratio,\n num_training_steps=(len(train_dataloader) * config.num_epochs),\n )\n else:\n raise RuntimeError(f'Unrecognized lr schedule {config.lr_schedule}.')\n return lr_scheduler" }, { "identifier": "set_random_seed", "path": "diffusion/utils/misc.py", "snippet": "def set_random_seed(seed, deterministic=False):\n \"\"\"Set random seed.\n\n Args:\n seed (int): Seed to be used.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn.benchmark` to False.\n Default: False.\n \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n if deterministic:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False" }, { "identifier": "read_config", "path": "diffusion/utils/misc.py", "snippet": "def read_config(file):\n # solve config loading conflict when multi-processes\n import time\n while True:\n config = Config.fromfile(file)\n if len(config) == 0:\n time.sleep(0.1)\n continue\n break\n return config" }, { "identifier": "init_random_seed", "path": "diffusion/utils/misc.py", "snippet": "def init_random_seed(seed=None, device='cuda'):\n \"\"\"Initialize random seed.\n\n If the seed is not set, the seed will be automatically randomized,\n and then broadcast to all processes to prevent some potential bugs.\n\n Args:\n seed (int, Optional): The seed. Default to None.\n device (str): The device where the seed will be put on.\n Default to 'cuda'.\n\n Returns:\n int: Seed to be used.\n \"\"\"\n if seed is not None:\n return seed\n\n # Make sure all ranks share the same random seed to prevent\n # some potential bugs. Please refer to\n # https://github.com/open-mmlab/mmdetection/issues/6339\n rank, world_size = get_dist_info()\n seed = np.random.randint(2 ** 31)\n if world_size == 1:\n return seed\n\n if rank == 0:\n random_num = torch.tensor(seed, dtype=torch.int32, device=device)\n else:\n random_num = torch.tensor(0, dtype=torch.int32, device=device)\n dist.broadcast(random_num, src=0)\n return random_num.item()" }, { "identifier": "DebugUnderflowOverflow", "path": "diffusion/utils/misc.py", "snippet": "class DebugUnderflowOverflow:\n \"\"\"\n This debug class helps detect and understand where the model starts getting very large or very small, and more\n importantly `nan` or `inf` weight and activation elements.\n There are 2 working modes:\n 1. Underflow/overflow detection (default)\n 2. Specific batch absolute min/max tracing without detection\n Mode 1: Underflow/overflow detection\n To activate the underflow/overflow detection, initialize the object with the model :\n ```python\n debug_overflow = DebugUnderflowOverflow(model)\n ```\n then run the training as normal and if `nan` or `inf` gets detected in at least one of the weight, input or\n output elements this module will throw an exception and will print `max_frames_to_save` frames that lead to this\n event, each frame reporting\n 1. the fully qualified module name plus the class name whose `forward` was run\n 2. the absolute min and max value of all elements for each module weights, and the inputs and output\n For example, here is the header and the last few frames in detection report for `google/mt5-small` run in fp16 mixed precision :\n ```\n Detected inf/nan during batch_number=0\n Last 21 forward frames:\n abs min abs max metadata\n [...]\n encoder.block.2.layer.1.DenseReluDense.wi_0 Linear\n 2.17e-07 4.50e+00 weight\n 1.79e-06 4.65e+00 input[0]\n 2.68e-06 3.70e+01 output\n encoder.block.2.layer.1.DenseReluDense.wi_1 Linear\n 8.08e-07 2.66e+01 weight\n 1.79e-06 4.65e+00 input[0]\n 1.27e-04 2.37e+02 output\n encoder.block.2.layer.1.DenseReluDense.wo Linear\n 1.01e-06 6.44e+00 weight\n 0.00e+00 9.74e+03 input[0]\n 3.18e-04 6.27e+04 output\n encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense\n 1.79e-06 4.65e+00 input[0]\n 3.18e-04 6.27e+04 output\n encoder.block.2.layer.1.dropout Dropout\n 3.18e-04 6.27e+04 input[0]\n 0.00e+00 inf output\n ```\n You can see here, that `T5DenseGatedGeluDense.forward` resulted in output activations, whose absolute max value\n was around 62.7K, which is very close to fp16's top limit of 64K. In the next frame we have `Dropout` which\n renormalizes the weights, after it zeroed some of the elements, which pushes the absolute max value to more than\n 64K, and we get an overlow.\n As you can see it's the previous frames that we need to look into when the numbers start going into very large for\n fp16 numbers.\n The tracking is done in a forward hook, which gets invoked immediately after `forward` has completed.\n By default the last 21 frames are printed. You can change the default to adjust for your needs. For example :\n ```python\n debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100)\n ```\n To validate that you have set up this debugging feature correctly, and you intend to use it in a training that may\n take hours to complete, first run it with normal tracing enabled for one of a few batches as explained in the next\n section.\n Mode 2. Specific batch absolute min/max tracing without detection\n The second work mode is per-batch tracing with the underflow/overflow detection feature turned off.\n Let's say you want to watch the absolute min and max values for all the ingredients of each `forward` call of a\n given batch, and only do that for batches 1 and 3. Then you instantiate this class as :\n ```python\n debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1,3])\n ```\n And now full batches 1 and 3 will be traced using the same format as explained above. Batches are 0-indexed.\n This is helpful if you know that the program starts misbehaving after a certain batch number, so you can\n fast-forward right to that area.\n Early stopping:\n You can also specify the batch number after which to stop the training, with :\n ```python\n debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1,3], abort_after_batch_num=3)\n ```\n This feature is mainly useful in the tracing mode, but you can use it for any mode.\n **Performance**:\n As this module measures absolute `min`/``max` of each weight of the model on every forward it'll slow the\n training down. Therefore remember to turn it off once the debugging needs have been met.\n Args:\n model (`nn.Module`):\n The model to debug.\n max_frames_to_save (`int`, *optional*, defaults to 21):\n How many frames back to record\n trace_batch_nums(`List[int]`, *optional*, defaults to `[]`):\n Which batch numbers to trace (turns detection off)\n abort_after_batch_num (`int``, *optional*):\n Whether to abort after a certain batch number has finished\n \"\"\"\n\n def __init__(self, model, max_frames_to_save=21, trace_batch_nums=[], abort_after_batch_num=None):\n self.model = model\n self.trace_batch_nums = trace_batch_nums\n self.abort_after_batch_num = abort_after_batch_num\n\n # keep a LIFO buffer of frames to dump as soon as inf/nan is encountered to give context to the problem emergence\n self.frames = collections.deque([], max_frames_to_save)\n self.frame = []\n self.batch_number = 0\n self.total_calls = 0\n self.detected_overflow = False\n self.prefix = \" \"\n\n self.analyse_model()\n\n self.register_forward_hook()\n\n def save_frame(self, frame=None):\n if frame is not None:\n self.expand_frame(frame)\n self.frames.append(\"\\n\".join(self.frame))\n self.frame = [] # start a new frame\n\n def expand_frame(self, line):\n self.frame.append(line)\n\n def trace_frames(self):\n print(\"\\n\".join(self.frames))\n self.frames = []\n\n def reset_saved_frames(self):\n self.frames = []\n\n def dump_saved_frames(self):\n print(f\"\\nDetected inf/nan during batch_number={self.batch_number} \"\n f\"Last {len(self.frames)} forward frames:\"\n f\"{'abs min':8} {'abs max':8} metadata\"\n f\"'\\n'.join(self.frames)\"\n f\"\\n\\n\")\n self.frames = []\n\n def analyse_model(self):\n # extract the fully qualified module names, to be able to report at run time. e.g.:\n # encoder.block.2.layer.0.SelfAttention.o\n #\n # for shared weights only the first shared module name will be registered\n self.module_names = {m: name for name, m in self.model.named_modules()}\n # self.longest_module_name = max(len(v) for v in self.module_names.values())\n\n def analyse_variable(self, var, ctx):\n if torch.is_tensor(var):\n self.expand_frame(self.get_abs_min_max(var, ctx))\n if self.detect_overflow(var, ctx):\n self.detected_overflow = True\n elif var is None:\n self.expand_frame(f\"{'None':>17} {ctx}\")\n else:\n self.expand_frame(f\"{'not a tensor':>17} {ctx}\")\n\n def batch_start_frame(self):\n self.expand_frame(f\"\\n\\n{self.prefix} *** Starting batch number={self.batch_number} ***\")\n self.expand_frame(f\"{'abs min':8} {'abs max':8} metadata\")\n\n def batch_end_frame(self):\n self.expand_frame(f\"{self.prefix} *** Finished batch number={self.batch_number - 1} ***\\n\\n\")\n\n def create_frame(self, module, input, output):\n self.expand_frame(f\"{self.prefix} {self.module_names[module]} {module.__class__.__name__}\")\n\n # params\n for name, p in module.named_parameters(recurse=False):\n self.analyse_variable(p, name)\n\n # inputs\n if isinstance(input, tuple):\n for i, x in enumerate(input):\n self.analyse_variable(x, f\"input[{i}]\")\n else:\n self.analyse_variable(input, \"input\")\n\n # outputs\n if isinstance(output, tuple):\n for i, x in enumerate(output):\n # possibly a tuple of tuples\n if isinstance(x, tuple):\n for j, y in enumerate(x):\n self.analyse_variable(y, f\"output[{i}][{j}]\")\n else:\n self.analyse_variable(x, f\"output[{i}]\")\n else:\n self.analyse_variable(output, \"output\")\n\n self.save_frame()\n\n def register_forward_hook(self):\n self.model.apply(self._register_forward_hook)\n\n def _register_forward_hook(self, module):\n module.register_forward_hook(self.forward_hook)\n\n def forward_hook(self, module, input, output):\n # - input is a tuple of packed inputs (could be non-Tensors)\n # - output could be a Tensor or a tuple of Tensors and non-Tensors\n\n last_frame_of_batch = False\n\n trace_mode = True if self.batch_number in self.trace_batch_nums else False\n if trace_mode:\n self.reset_saved_frames()\n\n if self.total_calls == 0:\n self.batch_start_frame()\n self.total_calls += 1\n\n # count batch numbers - the very first forward hook of the batch will be called when the\n # batch completes - i.e. it gets called very last - we know this batch has finished\n if module == self.model:\n self.batch_number += 1\n last_frame_of_batch = True\n\n self.create_frame(module, input, output)\n\n # if last_frame_of_batch:\n # self.batch_end_frame()\n\n if trace_mode:\n self.trace_frames()\n\n if last_frame_of_batch:\n self.batch_start_frame()\n\n if self.detected_overflow and not trace_mode:\n self.dump_saved_frames()\n\n # now we can abort, as it's pointless to continue running\n raise ValueError(\n \"DebugUnderflowOverflow: inf/nan detected, aborting as there is no point running further. \"\n \"Please scroll up above this traceback to see the activation values prior to this event.\"\n )\n\n # abort after certain batch if requested to do so\n if self.abort_after_batch_num is not None and self.batch_number > self.abort_after_batch_num:\n raise ValueError(\n f\"DebugUnderflowOverflow: aborting after {self.batch_number} batches due to `abort_after_batch_num={self.abort_after_batch_num}` arg\"\n )\n\n @staticmethod\n def get_abs_min_max(var, ctx):\n abs_var = var.abs()\n return f\"{abs_var.min():8.2e} {abs_var.max():8.2e} {ctx}\"\n\n @staticmethod\n def detect_overflow(var, ctx):\n \"\"\"\n Report whether the tensor contains any `nan` or `inf` entries.\n This is useful for detecting overflows/underflows and best to call right after the function that did some math that\n modified the tensor in question.\n This function contains a few other helper features that you can enable and tweak directly if you want to track\n various other things.\n Args:\n var: the tensor variable to check\n ctx: the message to print as a context\n Return:\n `True` if `inf` or `nan` was detected, `False` otherwise\n \"\"\"\n detected = False\n if torch.isnan(var).any().item():\n detected = True\n print(f\"{ctx} has nans\")\n if torch.isinf(var).any().item():\n detected = True\n print(f\"{ctx} has infs\")\n if var.dtype == torch.float32 and torch.ge(var.abs(), 65535).any().item():\n detected = True\n print(f\"{ctx} has overflow values {var.abs().max().item()}.\")\n # if needed to monitor large elements can enable the following\n if 0: # and detected:\n n100 = var[torch.ge(var.abs(), 100)]\n if n100.numel() > 0:\n print(f\"{ctx}: n100={n100.numel()}\")\n n1000 = var[torch.ge(var.abs(), 1000)]\n if n1000.numel() > 0:\n print(f\"{ctx}: n1000={n1000.numel()}\")\n n10000 = var[torch.ge(var.abs(), 10000)]\n if n10000.numel() > 0:\n print(f\"{ctx}: n10000={n10000.numel()}\")\n\n if 0:\n print(f\"min={var.min():9.2e} max={var.max():9.2e}\")\n\n if 0:\n print(f\"min={var.min():9.2e} max={var.max():9.2e} var={var.var():9.2e} mean={var.mean():9.2e} ({ctx})\")\n\n return detected" }, { "identifier": "build_optimizer", "path": "diffusion/utils/optimizer.py", "snippet": "def build_optimizer(model, optimizer_cfg):\n # default parameter-wise config\n logger = get_root_logger()\n\n if hasattr(model, 'module'):\n model = model.module\n # set optimizer constructor\n optimizer_cfg.setdefault('constructor', 'MyOptimizerConstructor')\n # parameter-wise setting: cancel weight decay for some specific modules\n custom_keys = dict()\n for name, module in model.named_modules():\n if hasattr(module, 'zero_weight_decay'):\n custom_keys.update({(name, key): dict(decay_mult=0) for key in module.zero_weight_decay})\n\n paramwise_cfg = Config(dict(cfg=dict(custom_keys=custom_keys)))\n given_cfg = optimizer_cfg.get('paramwise_cfg')\n if given_cfg:\n paramwise_cfg.merge_from_dict(dict(cfg=given_cfg))\n optimizer_cfg['paramwise_cfg'] = paramwise_cfg.cfg\n # build optimizer\n optimizer = mm_build_optimizer(model, optimizer_cfg)\n\n weight_decay_groups = dict()\n lr_groups = dict()\n for group in optimizer.param_groups:\n if not group.get('requires_grad', True): continue\n lr_groups.setdefault(group['lr'], []).append(group)\n weight_decay_groups.setdefault(group['weight_decay'], []).append(group)\n\n learnable_count, fix_count = 0, 0\n for p in model.parameters():\n if p.requires_grad:\n learnable_count += 1\n else:\n fix_count += 1\n fix_info = f\"{learnable_count} are learnable, {fix_count} are fix\"\n lr_info = \"Lr group: \" + \", \".join([f'{len(group)} params with lr {lr:.5f}' for lr, group in lr_groups.items()])\n wd_info = \"Weight decay group: \" + \", \".join(\n [f'{len(group)} params with weight decay {wd}' for wd, group in weight_decay_groups.items()])\n opt_info = f\"Optimizer: total {len(optimizer.param_groups)} param groups, {fix_info}. {lr_info}; {wd_info}.\"\n logger.info(opt_info)\n\n return optimizer" }, { "identifier": "auto_scale_lr", "path": "diffusion/utils/optimizer.py", "snippet": "def auto_scale_lr(effective_bs, optimizer_cfg, rule='linear', base_batch_size=256):\n assert rule in ['linear', 'sqrt']\n logger = get_root_logger()\n # scale by world size\n if rule == 'sqrt':\n scale_ratio = math.sqrt(effective_bs / base_batch_size)\n elif rule == 'linear':\n scale_ratio = effective_bs / base_batch_size\n optimizer_cfg['lr'] *= scale_ratio\n logger.info(f'Automatically adapt lr to {optimizer_cfg[\"lr\"]:.7f} (using {rule} scaling rule).')\n return scale_ratio" } ]
import argparse import datetime import os import sys import time import types import warnings import torch from pathlib import Path from accelerate import Accelerator, InitProcessGroupKwargs from accelerate.utils import DistributedType from mmcv.runner import LogBuffer from torch.utils.data import RandomSampler from diffusion import IDDPM from diffusion.data.builder import build_dataset, build_dataloader, set_data_root from diffusion.model.builder import build_model from diffusion.model.nets import PixArtMS, ControlPixArtHalf, ControlPixArtMSHalf from diffusion.utils.checkpoint import save_checkpoint, load_checkpoint from diffusion.utils.data_sampler import AspectRatioBatchSampler, BalancedAspectRatioBatchSampler from diffusion.utils.dist_utils import synchronize, get_world_size, clip_grad_norm_ from diffusion.utils.logger import get_root_logger from diffusion.utils.lr_scheduler import build_lr_scheduler from diffusion.utils.misc import set_random_seed, read_config, init_random_seed, DebugUnderflowOverflow from diffusion.utils.optimizer import build_optimizer, auto_scale_lr from accelerate import FullyShardedDataParallelPlugin from torch.distributed.fsdp.fully_sharded_data_parallel import FullStateDictConfig
14,165
current_file_path = Path(__file__).resolve() sys.path.insert(0, str(current_file_path.parent.parent)) warnings.filterwarnings("ignore") # ignore warning def set_fsdp_env(): os.environ["ACCELERATE_USE_FSDP"] = 'true' os.environ["FSDP_AUTO_WRAP_POLICY"] = 'TRANSFORMER_BASED_WRAP' os.environ["FSDP_BACKWARD_PREFETCH"] = 'BACKWARD_PRE' os.environ["FSDP_TRANSFORMER_CLS_TO_WRAP"] = 'PixArtBlock' def train(): if config.get('debug_nan', False): DebugUnderflowOverflow(model) logger.info('NaN debugger registered. Start to detect overflow during training.') time_start, last_tic = time.time(), time.time() log_buffer = LogBuffer() start_step = start_epoch * len(train_dataloader) global_step = 0 total_steps = len(train_dataloader) * config.num_epochs load_vae_feat = getattr(train_dataloader.dataset, 'load_vae_feat', False) if not load_vae_feat: raise ValueError("Only support load vae features for now.") # Now you train the model for epoch in range(start_epoch + 1, config.num_epochs + 1): data_time_start = time.time() data_time_all = 0 for step, batch in enumerate(train_dataloader): data_time_all += time.time() - data_time_start z = batch[0] # 4 x 4 x 128 x 128 z:vae output, 3x1024x1024->vae->4x128x128 clean_images = z * config.scale_factor # vae needed scale factor y = batch[1] # 4 x 1 x 120 x 4096 # T5 extracted feature of caption, 120 token, 4096 y_mask = batch[2] # 4 x 1 x 1 x 120 # caption indicate whether valid data_info = batch[3] # Sample a random timestep for each image bs = clean_images.shape[0] timesteps = torch.randint(0, config.train_sampling_steps, (bs,), device=clean_images.device).long() grad_norm = None with accelerator.accumulate(model): # Predict the noise residual optimizer.zero_grad() loss_term = train_diffusion.training_losses(model, clean_images, timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info, c=data_info['condition'] * config.scale_factor)) loss = loss_term['loss'].mean() accelerator.backward(loss) if accelerator.sync_gradients:
current_file_path = Path(__file__).resolve() sys.path.insert(0, str(current_file_path.parent.parent)) warnings.filterwarnings("ignore") # ignore warning def set_fsdp_env(): os.environ["ACCELERATE_USE_FSDP"] = 'true' os.environ["FSDP_AUTO_WRAP_POLICY"] = 'TRANSFORMER_BASED_WRAP' os.environ["FSDP_BACKWARD_PREFETCH"] = 'BACKWARD_PRE' os.environ["FSDP_TRANSFORMER_CLS_TO_WRAP"] = 'PixArtBlock' def train(): if config.get('debug_nan', False): DebugUnderflowOverflow(model) logger.info('NaN debugger registered. Start to detect overflow during training.') time_start, last_tic = time.time(), time.time() log_buffer = LogBuffer() start_step = start_epoch * len(train_dataloader) global_step = 0 total_steps = len(train_dataloader) * config.num_epochs load_vae_feat = getattr(train_dataloader.dataset, 'load_vae_feat', False) if not load_vae_feat: raise ValueError("Only support load vae features for now.") # Now you train the model for epoch in range(start_epoch + 1, config.num_epochs + 1): data_time_start = time.time() data_time_all = 0 for step, batch in enumerate(train_dataloader): data_time_all += time.time() - data_time_start z = batch[0] # 4 x 4 x 128 x 128 z:vae output, 3x1024x1024->vae->4x128x128 clean_images = z * config.scale_factor # vae needed scale factor y = batch[1] # 4 x 1 x 120 x 4096 # T5 extracted feature of caption, 120 token, 4096 y_mask = batch[2] # 4 x 1 x 1 x 120 # caption indicate whether valid data_info = batch[3] # Sample a random timestep for each image bs = clean_images.shape[0] timesteps = torch.randint(0, config.train_sampling_steps, (bs,), device=clean_images.device).long() grad_norm = None with accelerator.accumulate(model): # Predict the noise residual optimizer.zero_grad() loss_term = train_diffusion.training_losses(model, clean_images, timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info, c=data_info['condition'] * config.scale_factor)) loss = loss_term['loss'].mean() accelerator.backward(loss) if accelerator.sync_gradients:
grad_norm = accelerator.clip_grad_norm_(model.parameters(), config.gradient_clip)
14
2023-10-12 14:16:33+00:00
16k
showlab/MotionDirector
MotionDirector_train.py
[ { "identifier": "UNet3DConditionModel", "path": "models/unet_3d_condition.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n r\"\"\"\n UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep\n and returns sample shaped output.\n\n This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library\n implements for all the models (such as downloading or saving, etc.)\n\n Parameters:\n sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):\n Height and width of input/output sample.\n in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample.\n out_channels (`int`, *optional*, defaults to 4): The number of channels in the output.\n down_block_types (`Tuple[str]`, *optional*, defaults to `(\"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"DownBlock2D\")`):\n The tuple of downsample blocks to use.\n up_block_types (`Tuple[str]`, *optional*, defaults to `(\"UpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\",)`):\n The tuple of upsample blocks to use.\n block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):\n The tuple of output channels for each block.\n layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.\n downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.\n mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.\n act_fn (`str`, *optional*, defaults to `\"silu\"`): The activation function to use.\n norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.\n If `None`, it will skip the normalization and activation layers in post-processing\n norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.\n cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features.\n attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.\n \"\"\"\n\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n up_block_types: Tuple[str] = (\"UpBlock3D\", \"CrossAttnUpBlock3D\", \"CrossAttnUpBlock3D\", \"CrossAttnUpBlock3D\"),\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: Optional[int] = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1024,\n attention_head_dim: Union[int, Tuple[int]] = 64,\n ):\n super().__init__()\n\n self.sample_size = sample_size\n self.gradient_checkpointing = False\n # Check inputs\n if len(down_block_types) != len(up_block_types):\n raise ValueError(\n f\"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}.\"\n )\n\n if len(block_out_channels) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n # input\n conv_in_kernel = 3\n conv_out_kernel = 3\n conv_in_padding = (conv_in_kernel - 1) // 2\n self.conv_in = nn.Conv2d(\n in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n\n # time\n time_embed_dim = block_out_channels[0] * 4\n self.time_proj = Timesteps(block_out_channels[0], True, 0)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(\n timestep_input_dim,\n time_embed_dim,\n act_fn=act_fn,\n )\n\n self.transformer_in = TransformerTemporalModel(\n num_attention_heads=8,\n attention_head_dim=attention_head_dim,\n in_channels=block_out_channels[0],\n num_layers=1,\n )\n\n # class embedding\n self.down_blocks = nn.ModuleList([])\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=False,\n )\n self.down_blocks.append(down_block)\n\n # mid\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=False,\n )\n\n # count how many layers upsample the images\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=False,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n if norm_num_groups is not None:\n self.conv_norm_out = nn.GroupNorm(\n num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps\n )\n self.conv_act = nn.SiLU()\n else:\n self.conv_norm_out = None\n self.conv_act = None\n\n conv_out_padding = (conv_out_kernel - 1) // 2\n self.conv_out = nn.Conv2d(\n block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding\n )\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, value=False):\n self.gradient_checkpointing = value\n self.mid_block.gradient_checkpointing = value\n for module in self.down_blocks + self.up_blocks:\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n timestep_cond: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, num_frames, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet3DConditionOutput`] instead of a plain tuple.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under\n `self.processor` in\n [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).\n\n Returns:\n [`~models.unet_2d_condition.UNet3DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet3DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n num_frames = sample.shape[2]\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n\n emb = self.time_embedding(t_emb, timestep_cond)\n emb = emb.repeat_interleave(repeats=num_frames, dim=0)\n encoder_hidden_states = encoder_hidden_states.repeat_interleave(repeats=num_frames, dim=0)\n\n # 2. pre-process\n sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:])\n sample = self.conv_in(sample)\n \n if num_frames > 1:\n if self.gradient_checkpointing:\n sample = transformer_g_c(self.transformer_in, sample, num_frames)\n else:\n sample = self.transformer_in(sample, num_frames=num_frames).sample\n\n # 3. down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, num_frames=num_frames)\n\n down_block_res_samples += res_samples\n\n if down_block_additional_residuals is not None:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples += (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # 4. mid\n if self.mid_block is not None:\n sample = self.mid_block(\n sample,\n emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n\n if mid_block_additional_residual is not None:\n sample = sample + mid_block_additional_residual\n\n # 5. up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n upsample_size=upsample_size,\n num_frames=num_frames,\n )\n\n # 6. post-process\n if self.conv_norm_out:\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n\n sample = self.conv_out(sample)\n\n # reshape to (batch, channel, framerate, width, height)\n sample = sample[None, :].reshape((-1, num_frames) + sample.shape[1:]).permute(0, 2, 1, 3, 4)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)" }, { "identifier": "VideoJsonDataset", "path": "utils/dataset.py", "snippet": "class VideoJsonDataset(Dataset):\n def __init__(\n self,\n tokenizer = None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 4,\n sample_start_idx: int = 1,\n frame_step: int = 1,\n json_path: str =\"\",\n json_data = None,\n vid_data_key: str = \"video_path\",\n preprocessed: bool = False,\n use_bucketing: bool = False,\n **kwargs\n ):\n self.vid_types = (\".mp4\", \".avi\", \".mov\", \".webm\", \".flv\", \".mjpeg\")\n self.use_bucketing = use_bucketing\n self.tokenizer = tokenizer\n self.preprocessed = preprocessed\n \n self.vid_data_key = vid_data_key\n self.train_data = self.load_from_json(json_path, json_data)\n\n self.width = width\n self.height = height\n\n self.n_sample_frames = n_sample_frames\n self.sample_start_idx = sample_start_idx\n self.frame_step = frame_step\n\n def build_json(self, json_data):\n extended_data = []\n for data in json_data['data']:\n for nested_data in data['data']:\n self.build_json_dict(\n data, \n nested_data, \n extended_data\n )\n json_data = extended_data\n return json_data\n\n def build_json_dict(self, data, nested_data, extended_data):\n clip_path = nested_data['clip_path'] if 'clip_path' in nested_data else None\n \n extended_data.append({\n self.vid_data_key: data[self.vid_data_key],\n 'frame_index': nested_data['frame_index'],\n 'prompt': nested_data['prompt'],\n 'clip_path': clip_path\n })\n \n def load_from_json(self, path, json_data):\n try:\n with open(path) as jpath:\n print(f\"Loading JSON from {path}\")\n json_data = json.load(jpath)\n\n return self.build_json(json_data)\n\n except:\n self.train_data = []\n print(\"Non-existant JSON path. Skipping.\")\n \n def validate_json(self, base_path, path):\n return os.path.exists(f\"{base_path}/{path}\")\n\n def get_frame_range(self, vr):\n return get_video_frames(\n vr, \n self.sample_start_idx, \n self.frame_step, \n self.n_sample_frames\n )\n \n def get_vid_idx(self, vr, vid_data=None):\n frames = self.n_sample_frames\n\n if vid_data is not None:\n idx = vid_data['frame_index']\n else:\n idx = self.sample_start_idx\n\n return idx\n\n def get_frame_buckets(self, vr):\n _, h, w = vr[0].shape \n width, height = sensible_buckets(self.width, self.height, h, w)\n # width, height = self.width, self.height\n resize = T.transforms.Resize((height, width), antialias=True)\n\n return resize\n\n def get_frame_batch(self, vr, resize=None):\n frame_range = self.get_frame_range(vr)\n frames = vr.get_batch(frame_range)\n video = rearrange(frames, \"f h w c -> f c h w\")\n\n if resize is not None: video = resize(video)\n return video\n\n def process_video_wrapper(self, vid_path):\n video, vr = process_video(\n vid_path,\n self.use_bucketing,\n self.width, \n self.height, \n self.get_frame_buckets, \n self.get_frame_batch\n )\n \n return video, vr \n\n def train_data_batch(self, index):\n\n # If we are training on individual clips.\n if 'clip_path' in self.train_data[index] and \\\n self.train_data[index]['clip_path'] is not None:\n\n vid_data = self.train_data[index]\n\n clip_path = vid_data['clip_path']\n \n # Get video prompt\n prompt = vid_data['prompt']\n\n video, _ = self.process_video_wrapper(clip_path)\n\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return video, prompt, prompt_ids\n\n # Assign train data\n train_data = self.train_data[index]\n \n # Get the frame of the current index.\n self.sample_start_idx = train_data['frame_index']\n \n # Initialize resize\n resize = None\n\n video, vr = self.process_video_wrapper(train_data[self.vid_data_key])\n\n # Get video prompt\n prompt = train_data['prompt']\n vr.seek(0)\n\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return video, prompt, prompt_ids\n\n @staticmethod\n def __getname__(): return 'json'\n\n def __len__(self):\n if self.train_data is not None:\n return len(self.train_data)\n else: \n return 0\n\n def __getitem__(self, index):\n \n # Initialize variables\n video = None\n prompt = None\n prompt_ids = None\n\n # Use default JSON training\n if self.train_data is not None:\n video, prompt, prompt_ids = self.train_data_batch(index)\n\n example = {\n \"pixel_values\": (video / 127.5 - 1.0),\n \"prompt_ids\": prompt_ids[0],\n \"text_prompt\": prompt,\n 'dataset': self.__getname__()\n }\n\n return example" }, { "identifier": "SingleVideoDataset", "path": "utils/dataset.py", "snippet": "class SingleVideoDataset(Dataset):\n def __init__(\n self,\n tokenizer = None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 4,\n frame_step: int = 1,\n single_video_path: str = \"\",\n single_video_prompt: str = \"\",\n use_caption: bool = False,\n use_bucketing: bool = False,\n **kwargs\n ):\n self.tokenizer = tokenizer\n self.use_bucketing = use_bucketing\n self.frames = []\n self.index = 1\n\n self.vid_types = (\".mp4\", \".avi\", \".mov\", \".webm\", \".flv\", \".mjpeg\")\n self.n_sample_frames = n_sample_frames\n self.frame_step = frame_step\n\n self.single_video_path = single_video_path\n self.single_video_prompt = single_video_prompt\n\n self.width = width\n self.height = height\n def create_video_chunks(self):\n vr = decord.VideoReader(self.single_video_path)\n vr_range = range(0, len(vr), self.frame_step)\n\n self.frames = list(self.chunk(vr_range, self.n_sample_frames))\n return self.frames\n\n def chunk(self, it, size):\n it = iter(it)\n return iter(lambda: tuple(islice(it, size)), ())\n\n def get_frame_batch(self, vr, resize=None):\n index = self.index\n frames = vr.get_batch(self.frames[self.index])\n video = rearrange(frames, \"f h w c -> f c h w\")\n\n if resize is not None: video = resize(video)\n return video\n\n def get_frame_buckets(self, vr):\n h, w, c = vr[0].shape\n width, height = sensible_buckets(self.width, self.height, w, h)\n resize = T.transforms.Resize((height, width), antialias=True)\n\n return resize\n \n def process_video_wrapper(self, vid_path):\n video, vr = process_video(\n vid_path,\n self.use_bucketing,\n self.width, \n self.height, \n self.get_frame_buckets, \n self.get_frame_batch\n )\n \n return video, vr \n\n def single_video_batch(self, index):\n train_data = self.single_video_path\n self.index = index\n\n if train_data.endswith(self.vid_types):\n video, _ = self.process_video_wrapper(train_data)\n\n prompt = self.single_video_prompt\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return video, prompt, prompt_ids\n else:\n raise ValueError(f\"Single video is not a video type. Types: {self.vid_types}\")\n \n @staticmethod\n def __getname__(): return 'single_video'\n\n def __len__(self):\n \n return len(self.create_video_chunks())\n\n def __getitem__(self, index):\n\n video, prompt, prompt_ids = self.single_video_batch(index)\n\n example = {\n \"pixel_values\": (video / 127.5 - 1.0),\n \"prompt_ids\": prompt_ids[0],\n \"text_prompt\": prompt,\n 'dataset': self.__getname__()\n }\n\n return example" }, { "identifier": "ImageDataset", "path": "utils/dataset.py", "snippet": "class ImageDataset(Dataset):\n \n def __init__(\n self,\n tokenizer = None,\n width: int = 256,\n height: int = 256,\n base_width: int = 256,\n base_height: int = 256,\n use_caption: bool = False,\n image_dir: str = '',\n single_img_prompt: str = '',\n use_bucketing: bool = False,\n fallback_prompt: str = '',\n **kwargs\n ):\n self.tokenizer = tokenizer\n self.img_types = (\".png\", \".jpg\", \".jpeg\", '.bmp')\n self.use_bucketing = use_bucketing\n\n self.image_dir = self.get_images_list(image_dir)\n self.fallback_prompt = fallback_prompt\n\n self.use_caption = use_caption\n self.single_img_prompt = single_img_prompt\n\n self.width = width\n self.height = height\n\n def get_images_list(self, image_dir):\n if os.path.exists(image_dir):\n imgs = [x for x in os.listdir(image_dir) if x.endswith(self.img_types)]\n full_img_dir = []\n\n for img in imgs: \n full_img_dir.append(f\"{image_dir}/{img}\")\n\n return sorted(full_img_dir)\n\n return ['']\n\n def image_batch(self, index):\n train_data = self.image_dir[index]\n img = train_data\n\n try:\n img = torchvision.io.read_image(img, mode=torchvision.io.ImageReadMode.RGB)\n except:\n img = T.transforms.PILToTensor()(Image.open(img).convert(\"RGB\"))\n\n width = self.width\n height = self.height\n\n if self.use_bucketing:\n _, h, w = img.shape\n width, height = sensible_buckets(width, height, w, h)\n \n resize = T.transforms.Resize((height, width), antialias=True)\n\n img = resize(img) \n img = repeat(img, 'c h w -> f c h w', f=16)\n\n prompt = get_text_prompt(\n file_path=train_data,\n text_prompt=self.single_img_prompt,\n fallback_prompt=self.fallback_prompt,\n ext_types=self.img_types, \n use_caption=True\n )\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return img, prompt, prompt_ids\n\n @staticmethod\n def __getname__(): return 'image'\n \n def __len__(self):\n # Image directory\n if os.path.exists(self.image_dir[0]):\n return len(self.image_dir)\n else:\n return 0\n\n def __getitem__(self, index):\n img, prompt, prompt_ids = self.image_batch(index)\n example = {\n \"pixel_values\": (img / 127.5 - 1.0),\n \"prompt_ids\": prompt_ids[0],\n \"text_prompt\": prompt, \n 'dataset': self.__getname__()\n }\n\n return example" }, { "identifier": "VideoFolderDataset", "path": "utils/dataset.py", "snippet": "class VideoFolderDataset(Dataset):\n def __init__(\n self,\n tokenizer=None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 16,\n fps: int = 8,\n path: str = \"./data\",\n fallback_prompt: str = \"\",\n use_bucketing: bool = False,\n **kwargs\n ):\n self.tokenizer = tokenizer\n self.use_bucketing = use_bucketing\n\n self.fallback_prompt = fallback_prompt\n\n self.video_files = glob(f\"{path}/*.mp4\")\n\n self.width = width\n self.height = height\n\n self.n_sample_frames = n_sample_frames\n self.fps = fps\n\n def get_frame_buckets(self, vr):\n h, w, c = vr[0].shape\n width, height = sensible_buckets(self.width, self.height, w, h)\n resize = T.transforms.Resize((height, width), antialias=True)\n\n return resize\n\n def get_frame_batch(self, vr, resize=None):\n n_sample_frames = self.n_sample_frames\n native_fps = vr.get_avg_fps()\n \n every_nth_frame = max(1, round(native_fps / self.fps))\n every_nth_frame = min(len(vr), every_nth_frame)\n \n effective_length = len(vr) // every_nth_frame\n if effective_length < n_sample_frames:\n n_sample_frames = effective_length\n\n effective_idx = random.randint(0, (effective_length - n_sample_frames))\n idxs = every_nth_frame * np.arange(effective_idx, effective_idx + n_sample_frames)\n\n video = vr.get_batch(idxs)\n video = rearrange(video, \"f h w c -> f c h w\")\n\n if resize is not None: video = resize(video)\n return video, vr\n \n def process_video_wrapper(self, vid_path):\n video, vr = process_video(\n vid_path,\n self.use_bucketing,\n self.width, \n self.height, \n self.get_frame_buckets, \n self.get_frame_batch\n )\n return video, vr\n \n def get_prompt_ids(self, prompt):\n return self.tokenizer(\n prompt,\n truncation=True,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n return_tensors=\"pt\",\n ).input_ids\n\n @staticmethod\n def __getname__(): return 'folder'\n\n def __len__(self):\n return len(self.video_files)\n\n def __getitem__(self, index):\n\n video, _ = self.process_video_wrapper(self.video_files[index])\n\n prompt = self.fallback_prompt\n\n prompt_ids = self.get_prompt_ids(prompt)\n\n return {\"pixel_values\": (video[0] / 127.5 - 1.0), \"prompt_ids\": prompt_ids[0], \"text_prompt\": prompt, 'dataset': self.__getname__()}" }, { "identifier": "CachedDataset", "path": "utils/dataset.py", "snippet": "class CachedDataset(Dataset):\n def __init__(self,cache_dir: str = ''):\n self.cache_dir = cache_dir\n self.cached_data_list = self.get_files_list()\n\n def get_files_list(self):\n tensors_list = [f\"{self.cache_dir}/{x}\" for x in os.listdir(self.cache_dir) if x.endswith('.pt')]\n return sorted(tensors_list)\n\n def __len__(self):\n return len(self.cached_data_list)\n\n def __getitem__(self, index):\n cached_latent = torch.load(self.cached_data_list[index], map_location='cuda:0')\n return cached_latent" }, { "identifier": "LoraHandler", "path": "utils/lora_handler.py", "snippet": "class LoraHandler(object):\n def __init__(\n self, \n version: LORA_VERSIONS = LoraVersions.cloneofsimo, \n use_unet_lora: bool = False,\n use_text_lora: bool = False,\n save_for_webui: bool = False,\n only_for_webui: bool = False,\n lora_bias: str = 'none',\n unet_replace_modules: list = None,\n text_encoder_replace_modules: list = None\n ):\n self.version = version\n self.lora_loader = self.get_lora_func(func_type=LoraFuncTypes.loader)\n self.lora_injector = self.get_lora_func(func_type=LoraFuncTypes.injector)\n self.lora_bias = lora_bias\n self.use_unet_lora = use_unet_lora\n self.use_text_lora = use_text_lora\n self.save_for_webui = save_for_webui\n self.only_for_webui = only_for_webui\n self.unet_replace_modules = unet_replace_modules\n self.text_encoder_replace_modules = text_encoder_replace_modules\n self.use_lora = any([use_text_lora, use_unet_lora])\n\n def is_cloneofsimo_lora(self):\n return self.version == LoraVersions.cloneofsimo\n\n\n def get_lora_func(self, func_type: LORA_FUNC_TYPES = LoraFuncTypes.loader):\n\n if self.is_cloneofsimo_lora():\n\n if func_type == LoraFuncTypes.loader:\n return monkeypatch_or_replace_lora_extended\n\n if func_type == LoraFuncTypes.injector:\n return inject_trainable_lora_extended\n \n assert \"LoRA Version does not exist.\"\n\n def check_lora_ext(self, lora_file: str):\n return lora_file.endswith(tuple(LORA_FILE_TYPES))\n\n def get_lora_file_path(\n self, \n lora_path: str, \n model: Union[UNet3DConditionModel, CLIPTextModel]\n ):\n if os.path.exists(lora_path):\n lora_filenames = [fns for fns in os.listdir(lora_path)]\n is_lora = self.check_lora_ext(lora_path)\n\n is_unet = isinstance(model, UNet3DConditionModel)\n is_text = isinstance(model, CLIPTextModel)\n idx = 0 if is_unet else 1\n\n base_name = FILE_BASENAMES[idx]\n \n for lora_filename in lora_filenames:\n is_lora = self.check_lora_ext(lora_filename)\n if not is_lora:\n continue\n \n if base_name in lora_filename:\n return os.path.join(lora_path, lora_filename)\n\n return None\n\n def handle_lora_load(self, file_name:str, lora_loader_args: dict = None):\n self.lora_loader(**lora_loader_args)\n print(f\"Successfully loaded LoRA from: {file_name}\")\n \n def load_lora(self, model, lora_path: str = '', lora_loader_args: dict = None,):\n try:\n lora_file = self.get_lora_file_path(lora_path, model)\n\n if lora_file is not None:\n lora_loader_args.update({\"lora_path\": lora_file})\n self.handle_lora_load(lora_file, lora_loader_args)\n\n else:\n print(f\"Could not load LoRAs for {model.__class__.__name__}. Injecting new ones instead...\")\n\n except Exception as e:\n print(f\"An error occurred while loading a LoRA file: {e}\")\n \n def get_lora_func_args(self, lora_path, use_lora, model, replace_modules, r, dropout, lora_bias, scale):\n return_dict = lora_args.copy()\n \n if self.is_cloneofsimo_lora():\n return_dict = filter_dict(return_dict, keys=CLONE_OF_SIMO_KEYS)\n return_dict.update({\n \"model\": model,\n \"loras\": self.get_lora_file_path(lora_path, model),\n \"target_replace_module\": replace_modules,\n \"r\": r,\n \"scale\": scale,\n \"dropout_p\": dropout,\n })\n\n return return_dict\n\n def do_lora_injection(\n self, \n model, \n replace_modules, \n bias='none',\n dropout=0,\n r=4,\n lora_loader_args=None,\n ): \n REPLACE_MODULES = replace_modules\n\n params = None\n negation = None\n is_injection_hybrid = False\n \n if self.is_cloneofsimo_lora():\n is_injection_hybrid = True\n injector_args = lora_loader_args\n\n params, negation = self.lora_injector(**injector_args) # inject_trainable_lora_extended\n for _up, _down in extract_lora_ups_down(\n model, \n target_replace_module=REPLACE_MODULES):\n\n if all(x is not None for x in [_up, _down]):\n print(f\"Lora successfully injected into {model.__class__.__name__}.\")\n\n break\n\n return params, negation, is_injection_hybrid\n\n return params, negation, is_injection_hybrid\n\n def add_lora_to_model(self, use_lora, model, replace_modules, dropout=0.0, lora_path='', r=16, scale=1.0):\n\n params = None\n negation = None\n\n lora_loader_args = self.get_lora_func_args(\n lora_path,\n use_lora,\n model,\n replace_modules,\n r,\n dropout,\n self.lora_bias,\n scale\n )\n\n if use_lora:\n params, negation, is_injection_hybrid = self.do_lora_injection(\n model, \n replace_modules, \n bias=self.lora_bias,\n lora_loader_args=lora_loader_args,\n dropout=dropout,\n r=r\n )\n\n if not is_injection_hybrid:\n self.load_lora(model, lora_path=lora_path, lora_loader_args=lora_loader_args)\n \n params = model if params is None else params\n return params, negation\n\n def save_cloneofsimo_lora(self, model, save_path, step, flag):\n \n def save_lora(model, name, condition, replace_modules, step, save_path, flag=None):\n if condition and replace_modules is not None:\n save_path = f\"{save_path}/{step}_{name}.pt\"\n save_lora_weight(model, save_path, replace_modules, flag)\n\n save_lora(\n model.unet, \n FILE_BASENAMES[0], \n self.use_unet_lora, \n self.unet_replace_modules, \n step,\n save_path,\n flag\n )\n save_lora(\n model.text_encoder, \n FILE_BASENAMES[1], \n self.use_text_lora, \n self.text_encoder_replace_modules, \n step, \n save_path,\n flag\n )\n\n # train_patch_pipe(model, self.use_unet_lora, self.use_text_lora)\n\n def save_lora_weights(self, model: None, save_path: str ='',step: str = '', flag=None):\n save_path = f\"{save_path}/lora\"\n os.makedirs(save_path, exist_ok=True)\n\n if self.is_cloneofsimo_lora():\n if any([self.save_for_webui, self.only_for_webui]):\n warnings.warn(\n \"\"\"\n You have 'save_for_webui' enabled, but are using cloneofsimo's LoRA implemention.\n Only 'stable_lora' is supported for saving to a compatible webui file.\n \"\"\"\n )\n self.save_cloneofsimo_lora(model, save_path, step, flag)" }, { "identifier": "extract_lora_child_module", "path": "utils/lora.py", "snippet": "def extract_lora_child_module(model, target_replace_module=DEFAULT_TARGET_REPLACE):\n\n loras = []\n\n for target_replace_module_i in target_replace_module:\n\n for _m, _n, _child_module in _find_modules(\n model,\n [target_replace_module_i],\n search_class=[LoraInjectedLinear, LoraInjectedConv2d, LoraInjectedConv3d],\n ):\n loras.append(_child_module)\n\n return loras" }, { "identifier": "ddim_inversion", "path": "utils/ddim_utils.py", "snippet": "@torch.no_grad()\ndef ddim_inversion(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt=\"\"):\n ddim_latents = ddim_loop(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt)\n return ddim_latents" } ]
import argparse import datetime import logging import inspect import math import os import random import gc import copy import torch import torch.nn.functional as F import torch.utils.checkpoint import diffusers import transformers import imageio import numpy as np import itertools import bitsandbytes as bnb from typing import Dict, Optional, Tuple from omegaconf import OmegaConf from torchvision import transforms from tqdm.auto import tqdm from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from models.unet_3d_condition import UNet3DConditionModel from diffusers.models import AutoencoderKL from diffusers import DDIMScheduler, TextToVideoSDPipeline from diffusers.optimization import get_scheduler from diffusers.utils.import_utils import is_xformers_available from diffusers.models.attention_processor import AttnProcessor2_0, Attention from diffusers.models.attention import BasicTransformerBlock from transformers import CLIPTextModel, CLIPTokenizer from transformers.models.clip.modeling_clip import CLIPEncoder from utils.dataset import VideoJsonDataset, SingleVideoDataset, \ ImageDataset, VideoFolderDataset, CachedDataset from einops import rearrange, repeat from utils.lora_handler import LoraHandler from utils.lora import extract_lora_child_module from utils.ddim_utils import ddim_inversion from xformers.ops import MemoryEfficientAttentionFlashAttentionOp
13,405
if is_lora and condition and isinstance(model, list): params = create_optim_params( params=itertools.chain(*model), extra_params=extra_params ) optimizer_params.append(params) continue if is_lora and condition and not isinstance(model, list): for n, p in model.named_parameters(): if 'lora' in n: params = create_optim_params(n, p, lr, extra_params) optimizer_params.append(params) continue # If this is true, we can train it. if condition: for n, p in model.named_parameters(): should_negate = 'lora' in n and not is_lora if should_negate: continue params = create_optim_params(n, p, lr, extra_params) optimizer_params.append(params) return optimizer_params def get_optimizer(use_8bit_adam): if use_8bit_adam: try: except ImportError: raise ImportError( "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" ) return bnb.optim.AdamW8bit else: return torch.optim.AdamW def is_mixed_precision(accelerator): weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 return weight_dtype def cast_to_gpu_and_type(model_list, accelerator, weight_dtype): for model in model_list: if model is not None: model.to(accelerator.device, dtype=weight_dtype) def inverse_video(pipe, latents, num_steps): ddim_inv_scheduler = DDIMScheduler.from_config(pipe.scheduler.config) ddim_inv_scheduler.set_timesteps(num_steps) ddim_inv_latent = ddim_inversion( pipe, ddim_inv_scheduler, video_latent=latents.to(pipe.device), num_inv_steps=num_steps, prompt="")[-1] return ddim_inv_latent def handle_cache_latents( should_cache, output_dir, train_dataloader, train_batch_size, vae, unet, pretrained_model_path, noise_prior, cached_latent_dir=None, ): # Cache latents by storing them in VRAM. # Speeds up training and saves memory by not encoding during the train loop. if not should_cache: return None vae.to('cuda', dtype=torch.float16) vae.enable_slicing() pipe = TextToVideoSDPipeline.from_pretrained( pretrained_model_path, vae=vae, unet=copy.deepcopy(unet).to('cuda', dtype=torch.float16) ) pipe.text_encoder.to('cuda', dtype=torch.float16) cached_latent_dir = ( os.path.abspath(cached_latent_dir) if cached_latent_dir is not None else None ) if cached_latent_dir is None: cache_save_dir = f"{output_dir}/cached_latents" os.makedirs(cache_save_dir, exist_ok=True) for i, batch in enumerate(tqdm(train_dataloader, desc="Caching Latents.")): save_name = f"cached_{i}" full_out_path = f"{cache_save_dir}/{save_name}.pt" pixel_values = batch['pixel_values'].to('cuda', dtype=torch.float16) batch['latents'] = tensor_to_vae_latent(pixel_values, vae) if noise_prior > 0.: batch['inversion_noise'] = inverse_video(pipe, batch['latents'], 50) for k, v in batch.items(): batch[k] = v[0] torch.save(batch, full_out_path) del pixel_values del batch # We do this to avoid fragmentation from casting latents between devices. torch.cuda.empty_cache() else: cache_save_dir = cached_latent_dir return torch.utils.data.DataLoader(
already_printed_trainables = False logger = get_logger(__name__, log_level="INFO") def create_logging(logging, logger, accelerator): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) def accelerate_set_verbose(accelerator): if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() def get_train_dataset(dataset_types, train_data, tokenizer): train_datasets = [] # Loop through all available datasets, get the name, then add to list of data to process. for DataSet in [VideoJsonDataset, SingleVideoDataset, ImageDataset, VideoFolderDataset]: for dataset in dataset_types: if dataset == DataSet.__getname__(): train_datasets.append(DataSet(**train_data, tokenizer=tokenizer)) if len(train_datasets) > 0: return train_datasets else: raise ValueError("Dataset type not found: 'json', 'single_video', 'folder', 'image'") def extend_datasets(datasets, dataset_items, extend=False): biggest_data_len = max(x.__len__() for x in datasets) extended = [] for dataset in datasets: if dataset.__len__() == 0: del dataset continue if dataset.__len__() < biggest_data_len: for item in dataset_items: if extend and item not in extended and hasattr(dataset, item): print(f"Extending {item}") value = getattr(dataset, item) value *= biggest_data_len value = value[:biggest_data_len] setattr(dataset, item, value) print(f"New {item} dataset length: {dataset.__len__()}") extended.append(item) def export_to_video(video_frames, output_video_path, fps): video_writer = imageio.get_writer(output_video_path, fps=fps) for img in video_frames: video_writer.append_data(np.array(img)) video_writer.close() def create_output_folders(output_dir, config): now = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S") out_dir = os.path.join(output_dir, f"train_{now}") os.makedirs(out_dir, exist_ok=True) os.makedirs(f"{out_dir}/samples", exist_ok=True) # OmegaConf.save(config, os.path.join(out_dir, 'config.yaml')) return out_dir def load_primary_models(pretrained_model_path): noise_scheduler = DDIMScheduler.from_pretrained(pretrained_model_path, subfolder="scheduler") tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer") text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder") vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae") unet = UNet3DConditionModel.from_pretrained(pretrained_model_path, subfolder="unet") return noise_scheduler, tokenizer, text_encoder, vae, unet def unet_and_text_g_c(unet, text_encoder, unet_enable, text_enable): unet._set_gradient_checkpointing(value=unet_enable) text_encoder._set_gradient_checkpointing(CLIPEncoder, value=text_enable) def freeze_models(models_to_freeze): for model in models_to_freeze: if model is not None: model.requires_grad_(False) def is_attn(name): return ('attn1' or 'attn2' == name.split('.')[-1]) def set_processors(attentions): for attn in attentions: attn.set_processor(AttnProcessor2_0()) def set_torch_2_attn(unet): optim_count = 0 for name, module in unet.named_modules(): if is_attn(name): if isinstance(module, torch.nn.ModuleList): for m in module: if isinstance(m, BasicTransformerBlock): set_processors([m.attn1, m.attn2]) optim_count += 1 if optim_count > 0: print(f"{optim_count} Attention layers using Scaled Dot Product Attention.") def handle_memory_attention(enable_xformers_memory_efficient_attention, enable_torch_2_attn, unet): try: is_torch_2 = hasattr(F, 'scaled_dot_product_attention') enable_torch_2 = is_torch_2 and enable_torch_2_attn if enable_xformers_memory_efficient_attention and not enable_torch_2: if is_xformers_available(): unet.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp) else: raise ValueError("xformers is not available. Make sure it is installed correctly") if enable_torch_2: set_torch_2_attn(unet) except: print("Could not enable memory efficient attention for xformers or Torch 2.0.") def param_optim(model, condition, extra_params=None, is_lora=False, negation=None): extra_params = extra_params if len(extra_params.keys()) > 0 else None return { "model": model, "condition": condition, 'extra_params': extra_params, 'is_lora': is_lora, "negation": negation } def create_optim_params(name='param', params=None, lr=5e-6, extra_params=None): params = { "name": name, "params": params, "lr": lr } if extra_params is not None: for k, v in extra_params.items(): params[k] = v return params def negate_params(name, negation): # We have to do this if we are co-training with LoRA. # This ensures that parameter groups aren't duplicated. if negation is None: return False for n in negation: if n in name and 'temp' not in name: return True return False def create_optimizer_params(model_list, lr): optimizer_params = [] for optim in model_list: model, condition, extra_params, is_lora, negation = optim.values() # Check if we are doing LoRA training. if is_lora and condition and isinstance(model, list): params = create_optim_params( params=itertools.chain(*model), extra_params=extra_params ) optimizer_params.append(params) continue if is_lora and condition and not isinstance(model, list): for n, p in model.named_parameters(): if 'lora' in n: params = create_optim_params(n, p, lr, extra_params) optimizer_params.append(params) continue # If this is true, we can train it. if condition: for n, p in model.named_parameters(): should_negate = 'lora' in n and not is_lora if should_negate: continue params = create_optim_params(n, p, lr, extra_params) optimizer_params.append(params) return optimizer_params def get_optimizer(use_8bit_adam): if use_8bit_adam: try: except ImportError: raise ImportError( "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" ) return bnb.optim.AdamW8bit else: return torch.optim.AdamW def is_mixed_precision(accelerator): weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 return weight_dtype def cast_to_gpu_and_type(model_list, accelerator, weight_dtype): for model in model_list: if model is not None: model.to(accelerator.device, dtype=weight_dtype) def inverse_video(pipe, latents, num_steps): ddim_inv_scheduler = DDIMScheduler.from_config(pipe.scheduler.config) ddim_inv_scheduler.set_timesteps(num_steps) ddim_inv_latent = ddim_inversion( pipe, ddim_inv_scheduler, video_latent=latents.to(pipe.device), num_inv_steps=num_steps, prompt="")[-1] return ddim_inv_latent def handle_cache_latents( should_cache, output_dir, train_dataloader, train_batch_size, vae, unet, pretrained_model_path, noise_prior, cached_latent_dir=None, ): # Cache latents by storing them in VRAM. # Speeds up training and saves memory by not encoding during the train loop. if not should_cache: return None vae.to('cuda', dtype=torch.float16) vae.enable_slicing() pipe = TextToVideoSDPipeline.from_pretrained( pretrained_model_path, vae=vae, unet=copy.deepcopy(unet).to('cuda', dtype=torch.float16) ) pipe.text_encoder.to('cuda', dtype=torch.float16) cached_latent_dir = ( os.path.abspath(cached_latent_dir) if cached_latent_dir is not None else None ) if cached_latent_dir is None: cache_save_dir = f"{output_dir}/cached_latents" os.makedirs(cache_save_dir, exist_ok=True) for i, batch in enumerate(tqdm(train_dataloader, desc="Caching Latents.")): save_name = f"cached_{i}" full_out_path = f"{cache_save_dir}/{save_name}.pt" pixel_values = batch['pixel_values'].to('cuda', dtype=torch.float16) batch['latents'] = tensor_to_vae_latent(pixel_values, vae) if noise_prior > 0.: batch['inversion_noise'] = inverse_video(pipe, batch['latents'], 50) for k, v in batch.items(): batch[k] = v[0] torch.save(batch, full_out_path) del pixel_values del batch # We do this to avoid fragmentation from casting latents between devices. torch.cuda.empty_cache() else: cache_save_dir = cached_latent_dir return torch.utils.data.DataLoader(
CachedDataset(cache_dir=cache_save_dir),
5
2023-10-12 12:06:55+00:00
16k
NVlabs/EmerNeRF
builders.py
[ { "identifier": "SceneDataset", "path": "datasets/base/scene_dataset.py", "snippet": "class SceneDataset(abc.ABC):\n \"\"\"\n Base class for scene dataset.\n \"\"\"\n\n data_cfg: OmegaConf = None\n pixel_source: ScenePixelSource = None\n lidar_source: SceneLidarSource = None\n # training and testing indices are indices into the full dataset\n # train_indices are img indices, so the length is num_cams * num_timesteps\n train_indices: List[int] = None\n test_indices: List[int] = None\n # train_timesteps are timesteps, so the length is num_timesteps (len(unique_timesteps))\n train_timesteps: Tensor = None\n test_timesteps: Tensor = None\n\n # dataset wrappers\n # full: includes all data\n full_pixel_set: SplitWrapper = None\n full_lidar_set: SplitWrapper = None\n # train: includes only training data\n train_pixel_set: SplitWrapper = None\n train_lidar_set: SplitWrapper = None\n # test: includes only testing data\n test_pixel_set: SplitWrapper = None\n test_lidar_set: SplitWrapper = None\n\n def __init__(\n self,\n data_config: OmegaConf,\n ):\n super().__init__()\n self.data_cfg = data_config\n\n @abc.abstractmethod\n def build_data_source(self):\n \"\"\"\n Create the data source for the dataset.\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def build_split_wrapper(self):\n \"\"\"\n Makes each data source as a Pytorch Dataset.\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def split_train_test(self):\n raise NotImplementedError\n\n def get_aabb(self) -> Tensor:\n if self.lidar_source is not None:\n aabb = self.lidar_source.get_aabb()\n else:\n aabb = self.pixel_source.get_aabb()\n return aabb\n\n @property\n def num_cams(self) -> int:\n return self.pixel_source.num_cams\n\n @property\n def scene_idx(self) -> int:\n return self.data_cfg.scene_idx\n\n @property\n def num_img_timesteps(self) -> int:\n return self.pixel_source.num_timesteps\n\n @property\n def num_lidar_timesteps(self) -> int:\n if self.lidar_source is None:\n logger.warning(\"No lidar source, returning num_img_timesteps\")\n return self.num_img_timesteps\n return self.lidar_source.num_timesteps\n\n @property\n def num_train_timesteps(self) -> int:\n return len(self.train_timesteps)\n\n @property\n def num_test_timesteps(self) -> int:\n return len(self.test_timesteps)\n\n @property\n def unique_normalized_training_timestamps(self) -> Tensor:\n return self.pixel_source.unique_normalized_timestamps[self.train_timesteps]\n\n @property\n def device(self):\n return self.data_cfg.preload_device" }, { "identifier": "DensityField", "path": "radiance_fields/radiance_field.py", "snippet": "class DensityField(nn.Module):\n def __init__(\n self,\n xyz_encoder: HashEncoder,\n aabb: Union[Tensor, List[float]] = [[-1.0, -1.0, -1.0, 1.0, 1.0, 1.0]],\n num_dims: int = 3,\n density_activation: Callable = lambda x: trunc_exp(x - 1),\n unbounded: bool = False,\n base_mlp_layer_width: int = 64,\n ) -> None:\n super().__init__()\n if not isinstance(aabb, Tensor):\n aabb = torch.tensor(aabb, dtype=torch.float32)\n self.register_buffer(\"aabb\", aabb)\n self.num_dims = num_dims\n self.density_activation = density_activation\n self.unbounded = unbounded\n self.xyz_encoder = xyz_encoder\n\n # density head\n self.base_mlp = nn.Sequential(\n nn.Linear(self.xyz_encoder.n_output_dims, base_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(base_mlp_layer_width, 1),\n )\n\n @property\n def device(self) -> torch.device:\n return self.aabb.device\n\n def set_aabb(self, aabb: Union[Tensor, List[float]]) -> None:\n if not isinstance(aabb, Tensor):\n aabb = torch.tensor(aabb, dtype=torch.float32)\n logger.info(f\"Set propnet aabb from {self.aabb} to {aabb}\")\n self.aabb.copy_(aabb)\n self.aabb = self.aabb.to(self.device)\n\n def forward(\n self, positions: Tensor, data_dict: Dict[str, Tensor] = None\n ) -> Dict[str, Tensor]:\n if self.unbounded:\n # use infinte norm to contract the positions for cuboid aabb\n positions = contract(positions, self.aabb, ord=float(\"inf\"))\n else:\n aabb_min, aabb_max = torch.split(self.aabb, 3, dim=-1)\n positions = (positions - aabb_min) / (aabb_max - aabb_min)\n selector = ((positions > 0.0) & (positions < 1.0)).all(dim=-1).to(positions)\n positions = positions * selector.unsqueeze(-1)\n xyz_encoding = self.xyz_encoder(positions.view(-1, self.num_dims))\n density_before_activation = self.base_mlp(xyz_encoding).view(\n list(positions.shape[:-1]) + [-1]\n )\n density = self.density_activation(density_before_activation)\n return {\"density\": density}" }, { "identifier": "RadianceField", "path": "radiance_fields/radiance_field.py", "snippet": "class RadianceField(nn.Module):\n def __init__(\n self,\n xyz_encoder: HashEncoder,\n dynamic_xyz_encoder: Optional[HashEncoder] = None,\n flow_xyz_encoder: Optional[HashEncoder] = None,\n aabb: Union[Tensor, List[float]] = [-1, -1, -1, 1, 1, 1],\n num_dims: int = 3,\n density_activation: Callable = lambda x: trunc_exp(x - 1),\n unbounded: bool = True,\n geometry_feature_dim: int = 15,\n base_mlp_layer_width: int = 64,\n head_mlp_layer_width: int = 64,\n enable_cam_embedding: bool = False,\n enable_img_embedding: bool = False,\n num_cams: int = 3,\n appearance_embedding_dim: int = 16,\n semantic_feature_dim: int = 64,\n feature_mlp_layer_width: int = 256,\n feature_embedding_dim: int = 768,\n enable_sky_head: bool = False,\n enable_shadow_head: bool = False,\n enable_feature_head: bool = False,\n num_train_timesteps: int = 0,\n interpolate_xyz_encoding: bool = False,\n enable_learnable_pe: bool = True,\n enable_temporal_interpolation: bool = False,\n ) -> None:\n super().__init__()\n # scene properties\n if not isinstance(aabb, Tensor):\n aabb = torch.tensor(aabb, dtype=torch.float32)\n self.register_buffer(\"aabb\", aabb)\n self.unbounded = unbounded\n self.num_cams = num_cams\n self.num_dims = num_dims\n self.density_activation = density_activation\n\n # appearance embedding\n self.enable_cam_embedding = enable_cam_embedding\n self.enable_img_embedding = enable_img_embedding\n self.appearance_embedding_dim = appearance_embedding_dim\n\n self.geometry_feature_dim = geometry_feature_dim\n # add semantic feature dim if feature head is enabled\n if not enable_feature_head:\n semantic_feature_dim = 0\n self.semantic_feature_dim = semantic_feature_dim\n\n # note: we use very conservative default values for mlps\n # usually you want to use larger ones\n\n # ======== Static Field ======== #\n self.xyz_encoder = xyz_encoder\n self.base_mlp = nn.Sequential(\n nn.Linear(self.xyz_encoder.n_output_dims, base_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(\n base_mlp_layer_width, geometry_feature_dim + semantic_feature_dim\n ),\n )\n\n # ======== Dynamic Field ======== #\n self.interpolate_xyz_encoding = interpolate_xyz_encoding\n self.dynamic_xyz_encoder = dynamic_xyz_encoder\n self.enable_temporal_interpolation = enable_temporal_interpolation\n if self.dynamic_xyz_encoder is not None:\n # for temporal interpolation\n self.register_buffer(\"training_timesteps\", torch.zeros(num_train_timesteps))\n self.dynamic_base_mlp = nn.Sequential(\n nn.Linear(self.dynamic_xyz_encoder.n_output_dims, base_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(\n base_mlp_layer_width,\n geometry_feature_dim + semantic_feature_dim,\n ),\n )\n\n # ======== Flow Field ======== #\n self.flow_xyz_encoder = flow_xyz_encoder\n if self.flow_xyz_encoder is not None:\n self.flow_mlp = nn.Sequential(\n nn.Linear(\n self.flow_xyz_encoder.n_output_dims,\n base_mlp_layer_width,\n ),\n nn.ReLU(),\n nn.Linear(base_mlp_layer_width, base_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(base_mlp_layer_width, 6), # 3 for forward, 3 for backward\n # no activation function for flow\n )\n\n # appearance embedding\n if self.enable_cam_embedding:\n # per-camera embedding\n self.appearance_embedding = nn.Embedding(num_cams, appearance_embedding_dim)\n elif self.enable_img_embedding:\n # per-image embedding\n self.appearance_embedding = nn.Embedding(\n num_train_timesteps * num_cams, appearance_embedding_dim\n )\n else:\n self.appearance_embedding = None\n\n # direction encoding\n self.direction_encoding = SinusoidalEncoder(\n n_input_dims=3, min_deg=0, max_deg=4\n )\n\n # ======== Color Head ======== #\n self.rgb_head = MLP(\n in_dims=geometry_feature_dim\n + self.direction_encoding.n_output_dims\n + (\n appearance_embedding_dim\n if self.enable_cam_embedding or self.enable_img_embedding\n else 0 # 2 or 0?\n ),\n out_dims=3,\n num_layers=3,\n hidden_dims=head_mlp_layer_width,\n skip_connections=[1],\n )\n\n # ======== Shadow Head ======== #\n self.enable_shadow_head = enable_shadow_head\n if self.enable_shadow_head:\n self.shadow_head = nn.Sequential(\n nn.Linear(geometry_feature_dim, base_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(base_mlp_layer_width, 1),\n nn.Sigmoid(),\n )\n\n # ======== Sky Head ======== #\n self.enable_sky_head = enable_sky_head\n if self.enable_sky_head:\n self.sky_head = MLP(\n in_dims=self.direction_encoding.n_output_dims\n + (\n appearance_embedding_dim\n if self.enable_cam_embedding or self.enable_img_embedding\n else 0\n ),\n out_dims=3,\n num_layers=3,\n hidden_dims=head_mlp_layer_width,\n skip_connections=[1],\n )\n if enable_feature_head:\n # feature sky head\n self.dino_sky_head = nn.Sequential(\n # TODO: remove appearance embedding from dino sky head\n nn.Linear(\n self.direction_encoding.n_output_dims\n + (\n appearance_embedding_dim\n if self.enable_cam_embedding or self.enable_img_embedding\n else 0\n ),\n feature_mlp_layer_width,\n ),\n nn.ReLU(),\n nn.Linear(feature_mlp_layer_width, feature_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(feature_mlp_layer_width, feature_embedding_dim),\n )\n\n # ======== Feature Head ======== #\n self.enable_feature_head = enable_feature_head\n if self.enable_feature_head:\n self.dino_head = nn.Sequential(\n nn.Linear(semantic_feature_dim, feature_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(feature_mlp_layer_width, feature_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(feature_mlp_layer_width, feature_embedding_dim),\n )\n # placeholders for visualization, will be registered when available\n self.register_buffer(\n \"feats_reduction_mat\", torch.zeros(feature_embedding_dim, 3)\n )\n self.register_buffer(\"feat_color_min\", torch.zeros(3, dtype=torch.float32))\n self.register_buffer(\"feat_color_max\", torch.ones(3, dtype=torch.float32))\n\n # positional embedding (PE) decomposition\n self.enable_learnable_pe = enable_learnable_pe\n if self.enable_learnable_pe:\n # globally-shared low-resolution learnable PE map\n self.learnable_pe_map = nn.Parameter(\n 0.05 * torch.randn(1, feature_embedding_dim // 2, 80, 120),\n requires_grad=True,\n )\n # a PE head to decode PE features\n self.pe_head = nn.Sequential(\n nn.Linear(feature_embedding_dim // 2, feature_embedding_dim),\n )\n\n def register_normalized_training_timesteps(\n self, normalized_timesteps: Tensor, time_diff: float = None\n ) -> None:\n \"\"\"\n register normalized timesteps for temporal interpolation\n\n Args:\n normalized_timesteps (Tensor): normalized timesteps in [0, 1]\n time_diff (float, optional): time difference between two consecutive timesteps. Defaults to None.\n \"\"\"\n if self.dynamic_xyz_encoder is not None:\n # register timesteps for temporal interpolation\n self.training_timesteps.copy_(normalized_timesteps)\n self.training_timesteps = self.training_timesteps.to(self.device)\n if time_diff is not None:\n # use the provided time difference if available\n self.time_diff = time_diff\n else:\n if len(self.training_timesteps) > 1:\n # otherwise, compute the time difference from the provided timesteps\n # it's important to make sure the provided timesteps are consecutive\n self.time_diff = (\n self.training_timesteps[1] - self.training_timesteps[0]\n )\n else:\n self.time_diff = 0\n\n def set_aabb(self, aabb: Union[Tensor, List[float]]) -> None:\n \"\"\"\n register aabb for scene space\n \"\"\"\n if not isinstance(aabb, Tensor):\n aabb = torch.tensor(aabb, dtype=torch.float32)\n logger.info(f\"Set aabb from {self.aabb} to {aabb}\")\n self.aabb.copy_(aabb)\n self.aabb = self.aabb.to(self.device)\n\n def register_feats_reduction_mat(\n self,\n feats_reduction_mat: Tensor,\n feat_color_min: Tensor,\n feat_color_max: Tensor,\n ) -> None:\n \"\"\"\n A placeholder for registering the PCA reduction matrix and min/max values for visualization.\n You may not want to compute PCA reduction matrix every time from the dataset.\n \"\"\"\n # for visualization\n self.feats_reduction_mat.copy_(feats_reduction_mat)\n self.feat_color_min.copy_(feat_color_min)\n self.feat_color_max.copy_(feat_color_max)\n self.feats_reduction_mat = self.feats_reduction_mat.to(self.device)\n self.feat_color_min = self.feat_color_min.to(self.device)\n self.feat_color_max = self.feat_color_max.to(self.device)\n\n @property\n def device(self) -> torch.device:\n return self.aabb.device\n\n def contract_points(\n self,\n positions: Tensor,\n ) -> Tensor:\n \"\"\"\n contract [-inf, inf] points to the range [0, 1] for hash encoding\n\n Returns:\n normed_positions: [..., 3] in [0, 1]\n \"\"\"\n if self.unbounded:\n # use infinte norm to contract the positions for cuboid aabb\n normed_positions = contract(positions, self.aabb, ord=float(\"inf\"))\n else:\n aabb_min, aabb_max = torch.split(self.aabb, 3, dim=-1)\n normed_positions = (positions - aabb_min) / (aabb_max - aabb_min)\n selector = (\n ((normed_positions > 0.0) & (normed_positions < 1.0))\n .all(dim=-1)\n .to(positions)\n )\n normed_positions = normed_positions * selector.unsqueeze(-1)\n return normed_positions\n\n def forward_static_hash(\n self,\n positions: Tensor,\n ) -> Tensor:\n \"\"\"\n forward pass for static hash encoding\n\n Returns:\n encoded_features: [..., geometry_feature_dim + (semantic_feature_dim)]\n normed_positions: [..., 3] in [0, 1]\n \"\"\"\n normed_positions = self.contract_points(positions)\n xyz_encoding = self.xyz_encoder(normed_positions.view(-1, self.num_dims))\n encoded_features = self.base_mlp(xyz_encoding).view(\n list(normed_positions.shape[:-1]) + [-1]\n )\n return encoded_features, normed_positions\n\n def forward_dynamic_hash(\n self,\n normed_positions: Tensor,\n normed_timestamps: Tensor,\n return_hash_encodings: bool = False,\n ) -> Union[Tuple[Tensor, Tensor], Tensor]:\n \"\"\"\n forward pass for dynamic hash encoding\n\n Returns:\n encoded_dynamic_feats: [..., geometry_feature_dim + (semantic_feature_dim)]\n dynamic_xyz_encoding: [..., n_output_dims] (optional)\n \"\"\"\n if normed_timestamps.shape[-1] != 1:\n normed_timestamps = normed_timestamps.unsqueeze(-1)\n # To be fixed.\n # if self.training or not self.enable_temporal_interpolation:\n if True:\n temporal_positions = torch.cat(\n [normed_positions, normed_timestamps], dim=-1\n )\n dynamic_xyz_encoding = self.dynamic_xyz_encoder(\n temporal_positions.view(-1, self.num_dims + 1)\n ).view(list(temporal_positions.shape[:-1]) + [-1])\n encoded_dynamic_feats = self.dynamic_base_mlp(dynamic_xyz_encoding)\n else:\n encoded_dynamic_feats = temporal_interpolation(\n normed_timestamps,\n self.training_timesteps,\n normed_positions,\n self.dynamic_xyz_encoder,\n self.dynamic_base_mlp,\n interpolate_xyz_encoding=self.interpolate_xyz_encoding,\n )\n if return_hash_encodings:\n return encoded_dynamic_feats, dynamic_xyz_encoding\n else:\n return encoded_dynamic_feats\n\n def forward_flow_hash(\n self,\n normed_positions: Tensor,\n normed_timestamps: Tensor,\n ) -> Tuple[Tensor, Tensor]:\n \"\"\"\n forward pass for flow hash encoding\n\n Returns:\n flow: [..., 6] (forward_flow, backward_flow)\n \"\"\"\n if normed_timestamps.shape[-1] != 1:\n normed_timestamps = normed_timestamps.unsqueeze(-1)\n if self.training or not self.enable_temporal_interpolation:\n temporal_positions = torch.cat(\n [normed_positions, normed_timestamps], dim=-1\n )\n flow_xyz_encoding = self.flow_xyz_encoder(\n temporal_positions.view(-1, self.num_dims + 1)\n ).view(list(temporal_positions.shape[:-1]) + [-1])\n flow = self.flow_mlp(flow_xyz_encoding)\n else:\n flow = temporal_interpolation(\n normed_timestamps,\n self.training_timesteps,\n normed_positions,\n self.flow_xyz_encoder,\n self.flow_mlp,\n interpolate_xyz_encoding=True,\n )\n return flow\n\n def forward(\n self,\n positions: Tensor,\n directions: Tensor = None,\n data_dict: Dict[str, Tensor] = {},\n return_density_only: bool = False,\n combine_static_dynamic: bool = False,\n query_feature_head: bool = True,\n query_pe_head: bool = True,\n ) -> Dict[str, Tensor]:\n \"\"\"\n Args:\n positions: [..., 3]\n directions: [..., 3]\n data_dict: a dictionary containing additional data\n return_density_only: if True, only return density without querying other heads\n combine_static_dynamic: if True, combine static and dynamic predictions based on static and dynamic density\n in addition to returning separate results for static and dynamic fields\n query_feature_head: if True, query feature head\n query_pe_head: if True, query PE head. Disable this if we want to directly query 3D features.\n Returns:\n results_dict: a dictionary containing everything\n \"\"\"\n results_dict = {}\n # forward static branch\n encoded_features, normed_positions = self.forward_static_hash(positions)\n geo_feats, semantic_feats = torch.split(\n encoded_features,\n [self.geometry_feature_dim, self.semantic_feature_dim],\n dim=-1,\n )\n static_density = self.density_activation(geo_feats[..., 0])\n\n has_timestamps = (\n \"normed_timestamps\" in data_dict or \"lidar_normed_timestamps\" in data_dict\n )\n if self.dynamic_xyz_encoder is not None and has_timestamps:\n # forward dynamic branch\n if \"normed_timestamps\" in data_dict:\n normed_timestamps = data_dict[\"normed_timestamps\"]\n elif \"lidar_normed_timestamps\" in data_dict:\n # we use `lidar_` prefix as an identifier to skip querying other heads\n normed_timestamps = data_dict[\"lidar_normed_timestamps\"]\n dynamic_feats, dynamic_hash_encodings = self.forward_dynamic_hash(\n normed_positions, normed_timestamps, return_hash_encodings=True\n )\n if self.flow_xyz_encoder is not None:\n flow = self.forward_flow_hash(normed_positions, normed_timestamps)\n forward_flow, backward_flow = flow[..., :3], flow[..., 3:]\n results_dict[\"forward_flow\"] = forward_flow\n results_dict[\"backward_flow\"] = backward_flow\n temporal_aggregation_results = self.temporal_aggregation(\n positions,\n normed_timestamps,\n forward_flow,\n backward_flow,\n dynamic_feats,\n )\n # overwrite dynamic feats using temporal aggregation results\n dynamic_feats = temporal_aggregation_results[\"dynamic_feats\"]\n # to be studied\n temporal_aggregation_results[\n \"current_dynamic_hash_encodings\"\n ] = dynamic_hash_encodings\n results_dict.update(temporal_aggregation_results)\n (dynamic_geo_feats, dynamic_semantic_feats,) = torch.split(\n dynamic_feats,\n [self.geometry_feature_dim, self.semantic_feature_dim],\n dim=-1,\n )\n dynamic_density = self.density_activation(dynamic_geo_feats[..., 0])\n # blend static and dynamic density to get the final density\n density = static_density + dynamic_density\n results_dict.update(\n {\n \"density\": density,\n \"static_density\": static_density,\n \"dynamic_density\": dynamic_density,\n }\n )\n if return_density_only:\n # skip querying other heads\n return results_dict\n\n if directions is not None:\n rgb_results = self.query_rgb(\n directions, geo_feats, dynamic_geo_feats, data_dict=data_dict\n )\n results_dict[\"dynamic_rgb\"] = rgb_results[\"dynamic_rgb\"]\n results_dict[\"static_rgb\"] = rgb_results[\"rgb\"]\n if combine_static_dynamic:\n static_ratio = static_density / (density + 1e-6)\n dynamic_ratio = dynamic_density / (density + 1e-6)\n results_dict[\"rgb\"] = (\n static_ratio[..., None] * results_dict[\"static_rgb\"]\n + dynamic_ratio[..., None] * results_dict[\"dynamic_rgb\"]\n )\n if self.enable_shadow_head:\n shadow_ratio = self.shadow_head(dynamic_geo_feats)\n results_dict[\"shadow_ratio\"] = shadow_ratio\n if combine_static_dynamic and \"rgb\" in results_dict:\n results_dict[\"rgb\"] = (\n static_ratio[..., None]\n * results_dict[\"rgb\"]\n * (1 - shadow_ratio)\n + dynamic_ratio[..., None] * results_dict[\"dynamic_rgb\"]\n )\n else:\n # if no dynamic branch, use static density\n results_dict[\"density\"] = static_density\n if return_density_only:\n # skip querying other heads\n return results_dict\n if directions is not None:\n rgb_results = self.query_rgb(directions, geo_feats, data_dict=data_dict)\n results_dict[\"rgb\"] = rgb_results[\"rgb\"]\n\n if self.enable_feature_head and query_feature_head:\n if self.enable_learnable_pe and query_pe_head:\n learnable_pe_map = (\n F.grid_sample(\n self.learnable_pe_map,\n # assume pixel coords have been normalize to [-1, 1]\n data_dict[\"pixel_coords\"].reshape(1, 1, -1, 2) * 2 - 1,\n align_corners=False, # didn't test with True\n mode=\"bilinear\", # didn't test with other modes\n )\n .squeeze(2)\n .squeeze(0)\n .permute(1, 0)\n )\n dino_pe = self.pe_head(learnable_pe_map)\n results_dict[\"dino_pe\"] = dino_pe\n dino_feats = self.dino_head(semantic_feats)\n\n if self.dynamic_xyz_encoder is not None and has_timestamps:\n dynamic_dino_feats = self.dino_head(dynamic_semantic_feats)\n results_dict[\"static_dino_feat\"] = dino_feats\n results_dict[\"dynamic_dino_feat\"] = dynamic_dino_feats\n if combine_static_dynamic:\n static_ratio = static_density / (density + 1e-6)\n dynamic_ratio = dynamic_density / (density + 1e-6)\n results_dict[\"dino_feat\"] = (\n static_ratio[..., None] * dino_feats\n + dynamic_ratio[..., None] * dynamic_dino_feats\n )\n else:\n results_dict[\"dino_feat\"] = dino_feats\n\n # query sky if not in lidar mode\n if (\n self.enable_sky_head\n and \"lidar_origin\" not in data_dict\n and directions is not None\n ):\n directions = directions[:, 0]\n reduced_data_dict = {k: v[:, 0] for k, v in data_dict.items()}\n sky_results = self.query_sky(directions, data_dict=reduced_data_dict)\n results_dict.update(sky_results)\n\n return results_dict\n\n def temporal_aggregation(\n self,\n positions: Tensor, # current world coordinates\n normed_timestamps: Tensor, # current normalized timestamps\n forward_flow: Tensor,\n backward_flow: Tensor,\n dynamic_feats: Tensor,\n ) -> Tensor:\n \"\"\"\n temporal aggregation for dynamic features\n Eq. (8) in the emernerf paper\n \"\"\"\n if normed_timestamps.shape[-1] != 1:\n normed_timestamps = normed_timestamps.unsqueeze(-1)\n if self.training:\n noise = torch.rand_like(forward_flow)[..., 0:1]\n else:\n noise = torch.ones_like(forward_flow)[..., 0:1]\n # forward and backward warped positions\n forward_warped_positions = self.contract_points(\n positions + forward_flow * noise\n )\n backward_warped_positions = self.contract_points(\n positions + backward_flow * noise\n )\n # forward and backward warped timestamps\n forward_warped_time = torch.clamp(\n normed_timestamps + self.time_diff * noise, 0, 1.0\n )\n backward_warped_time = torch.clamp(\n normed_timestamps - self.time_diff * noise, 0, 1.0\n )\n (\n forward_dynamic_feats,\n forward_dynamic_hash_encodings,\n ) = self.forward_dynamic_hash(\n forward_warped_positions,\n forward_warped_time,\n return_hash_encodings=True,\n )\n (\n backward_dynamic_feats,\n backward_dynamic_hash_encodings,\n ) = self.forward_dynamic_hash(\n backward_warped_positions,\n backward_warped_time,\n return_hash_encodings=True,\n )\n forward_pred_flow = self.forward_flow_hash(\n forward_warped_positions,\n forward_warped_time,\n )\n backward_pred_flow = self.forward_flow_hash(\n backward_warped_positions,\n backward_warped_time,\n )\n # simple weighted sum\n aggregated_dynamic_feats = (\n dynamic_feats + 0.5 * forward_dynamic_feats + 0.5 * backward_dynamic_feats\n ) / 2.0\n return {\n \"dynamic_feats\": aggregated_dynamic_feats,\n \"forward_pred_backward_flow\": forward_pred_flow[..., 3:],\n \"backward_pred_forward_flow\": backward_pred_flow[..., :3],\n # to be studied\n \"forward_dynamic_hash_encodings\": forward_dynamic_hash_encodings,\n \"backward_dynamic_hash_encodings\": backward_dynamic_hash_encodings,\n }\n\n def query_rgb(\n self,\n directions: Tensor,\n geo_feats: Tensor,\n dynamic_geo_feats: Tensor = None,\n data_dict: Dict[str, Tensor] = None,\n ) -> Tensor:\n directions = (directions + 1.0) / 2.0 # do we need this?\n h = self.direction_encoding(directions.reshape(-1, directions.shape[-1])).view(\n *directions.shape[:-1], -1\n )\n if self.enable_cam_embedding or self.enable_img_embedding:\n if \"cam_idx\" in data_dict and self.enable_cam_embedding:\n appearance_embedding = self.appearance_embedding(data_dict[\"cam_idx\"])\n elif \"img_idx\" in data_dict and self.enable_img_embedding:\n appearance_embedding = self.appearance_embedding(data_dict[\"img_idx\"])\n else:\n # use mean appearance embedding\n # print(\"using mean appearance embedding\")\n appearance_embedding = torch.ones(\n (*directions.shape[:-1], self.appearance_embedding_dim),\n device=directions.device,\n ) * self.appearance_embedding.weight.mean(dim=0)\n h = torch.cat([h, appearance_embedding], dim=-1)\n\n rgb = self.rgb_head(torch.cat([h, geo_feats], dim=-1))\n rgb = F.sigmoid(rgb)\n results = {\"rgb\": rgb}\n\n if self.dynamic_xyz_encoder is not None:\n assert (\n dynamic_geo_feats is not None\n ), \"Dynamic geometry features are not provided.\"\n dynamic_rgb = self.rgb_head(torch.cat([h, dynamic_geo_feats], dim=-1))\n dynamic_rgb = F.sigmoid(dynamic_rgb)\n results[\"dynamic_rgb\"] = dynamic_rgb\n return results\n\n def query_sky(\n self, directions: Tensor, data_dict: Dict[str, Tensor] = None\n ) -> Dict[str, Tensor]:\n if len(directions.shape) == 2:\n dd = self.direction_encoding(directions).to(directions)\n else:\n dd = self.direction_encoding(directions[:, 0]).to(directions)\n if self.enable_cam_embedding or self.enable_img_embedding:\n # optionally add appearance embedding\n if \"cam_idx\" in data_dict and self.enable_cam_embedding:\n appearance_embedding = self.appearance_embedding(data_dict[\"cam_idx\"])\n elif \"img_idx\" in data_dict and self.enable_img_embedding:\n appearance_embedding = self.appearance_embedding(data_dict[\"img_idx\"])\n else:\n # use mean appearance embedding\n appearance_embedding = torch.ones(\n (*directions.shape[:-1], self.appearance_embedding_dim),\n device=directions.device,\n ) * self.appearance_embedding.weight.mean(dim=0)\n dd = torch.cat([dd, appearance_embedding], dim=-1)\n rgb_sky = self.sky_head(dd).to(directions)\n rgb_sky = F.sigmoid(rgb_sky)\n results = {\"rgb_sky\": rgb_sky}\n if self.enable_feature_head:\n self.dino_sky_head(dd).to(directions)\n results[\"dino_sky_feat\"] = self.dino_sky_head(dd).to(directions)\n return results\n\n def query_flow(\n self, positions: Tensor, normed_timestamps: Tensor, query_density: bool = True\n ) -> Dict[str, Tensor]:\n \"\"\"\n query flow field\n \"\"\"\n normed_positions = self.contract_points(positions)\n flow = self.forward_flow_hash(normed_positions, normed_timestamps)\n results = {\n \"forward_flow\": flow[..., :3],\n \"backward_flow\": flow[..., 3:],\n }\n if query_density:\n # it's important to filter valid flows based on a dynamic density threshold.\n # flows are valid only if they are on dynamic points.\n dynamic_feats = self.forward_dynamic_hash(\n normed_positions, normed_timestamps\n )\n (dynamic_geo_feats, _,) = torch.split(\n dynamic_feats,\n [self.geometry_feature_dim, self.semantic_feature_dim],\n dim=-1,\n )\n dynamic_density = self.density_activation(dynamic_geo_feats[..., 0])\n results[\"dynamic_density\"] = dynamic_density\n return results\n\n def query_attributes(\n self,\n positions: Tensor,\n normed_timestamps: Tensor = None,\n query_feature_head: bool = True,\n ):\n \"\"\"\n query attributes (density, dino features, etc.)\n \"\"\"\n results_dict = {}\n encoded_features, normed_positions = self.forward_static_hash(positions)\n geo_feats, semantic_feats = torch.split(\n encoded_features,\n [self.geometry_feature_dim, self.semantic_feature_dim],\n dim=-1,\n )\n static_density = self.density_activation(geo_feats[..., 0])\n if self.dynamic_xyz_encoder is not None and normed_timestamps is not None:\n dynamic_feats, dynamic_hash_encodings = self.forward_dynamic_hash(\n normed_positions, normed_timestamps, return_hash_encodings=True\n )\n if self.flow_xyz_encoder is not None:\n flow = self.forward_flow_hash(normed_positions, normed_timestamps)\n forward_flow = flow[..., :3]\n backward_flow = flow[..., 3:]\n results_dict[\"forward_flow\"] = forward_flow\n results_dict[\"backward_flow\"] = backward_flow\n temporal_aggregation_results = self.temporal_aggregation(\n positions,\n normed_timestamps,\n forward_flow,\n backward_flow,\n dynamic_feats,\n )\n dynamic_feats = temporal_aggregation_results[\"dynamic_feats\"]\n temporal_aggregation_results[\n \"current_dynamic_hash_encodings\"\n ] = dynamic_hash_encodings\n results_dict.update(temporal_aggregation_results)\n\n (dynamic_geo_feats, dynamic_semantic_feats,) = torch.split(\n dynamic_feats,\n [self.geometry_feature_dim, self.semantic_feature_dim],\n dim=-1,\n )\n dynamic_density = self.density_activation(dynamic_geo_feats[..., 0])\n density = static_density + dynamic_density\n results_dict.update(\n {\n \"density\": density,\n \"static_density\": static_density,\n \"dynamic_density\": dynamic_density,\n # \"occupancy\": occupancy,\n }\n )\n else:\n results_dict[\"density\"] = static_density\n if self.enable_feature_head and query_feature_head:\n # query on demand\n dino_feats = self.dino_head(semantic_feats)\n if self.dynamic_xyz_encoder is not None and normed_timestamps is not None:\n dynamic_dino_feats = self.dino_head(dynamic_semantic_feats)\n results_dict[\"static_dino_feat\"] = dino_feats\n results_dict[\"dynamic_dino_feat\"] = dynamic_dino_feats\n results_dict[\"dino_feat\"] = (\n static_density.unsqueeze(-1) * dino_feats\n + dynamic_density.unsqueeze(-1) * dynamic_dino_feats\n ) / (density.unsqueeze(-1) + 1e-6)\n else:\n results_dict[\"dino_feat\"] = dino_feats\n return results_dict" }, { "identifier": "build_density_field", "path": "radiance_fields/radiance_field.py", "snippet": "def build_density_field(\n aabb: Union[Tensor, List[float]] = [[-1.0, -1.0, -1.0, 1.0, 1.0, 1.0]],\n type: Literal[\"HashEncoder\"] = \"HashEncoder\",\n n_input_dims: int = 3,\n n_levels: int = 5,\n base_resolution: int = 16,\n max_resolution: int = 128,\n log2_hashmap_size: int = 20,\n n_features_per_level: int = 2,\n unbounded: bool = True,\n) -> DensityField:\n if type == \"HashEncoder\":\n xyz_encoder = HashEncoder(\n n_input_dims=n_input_dims,\n n_levels=n_levels,\n base_resolution=base_resolution,\n max_resolution=max_resolution,\n log2_hashmap_size=log2_hashmap_size,\n n_features_per_level=n_features_per_level,\n )\n else:\n raise NotImplementedError(f\"Unknown (xyz_encoder) type: {type}\")\n return DensityField(\n xyz_encoder=xyz_encoder,\n aabb=aabb,\n unbounded=unbounded,\n )" }, { "identifier": "build_radiance_field_from_cfg", "path": "radiance_fields/radiance_field.py", "snippet": "def build_radiance_field_from_cfg(cfg, verbose=True) -> RadianceField:\n xyz_encoder = build_xyz_encoder_from_cfg(cfg.xyz_encoder, verbose=verbose)\n dynamic_xyz_encoder = None\n flow_xyz_encoder = None\n if cfg.head.enable_dynamic_branch:\n dynamic_xyz_encoder = build_xyz_encoder_from_cfg(\n cfg.dynamic_xyz_encoder, verbose=verbose\n )\n if cfg.head.enable_flow_branch:\n flow_xyz_encoder = HashEncoder(\n n_input_dims=4,\n n_levels=10,\n base_resolution=16,\n max_resolution=4096,\n log2_hashmap_size=18,\n n_features_per_level=4,\n )\n return RadianceField(\n xyz_encoder=xyz_encoder,\n dynamic_xyz_encoder=dynamic_xyz_encoder,\n flow_xyz_encoder=flow_xyz_encoder,\n unbounded=cfg.unbounded,\n num_cams=cfg.num_cams,\n geometry_feature_dim=cfg.neck.geometry_feature_dim,\n base_mlp_layer_width=cfg.neck.base_mlp_layer_width,\n head_mlp_layer_width=cfg.head.head_mlp_layer_width,\n enable_cam_embedding=cfg.head.enable_cam_embedding,\n enable_img_embedding=cfg.head.enable_img_embedding,\n appearance_embedding_dim=cfg.head.appearance_embedding_dim,\n enable_sky_head=cfg.head.enable_sky_head,\n enable_feature_head=cfg.head.enable_feature_head,\n semantic_feature_dim=cfg.neck.semantic_feature_dim,\n feature_mlp_layer_width=cfg.head.feature_mlp_layer_width,\n feature_embedding_dim=cfg.head.feature_embedding_dim,\n enable_shadow_head=cfg.head.enable_shadow_head,\n num_train_timesteps=cfg.num_train_timesteps, # placeholder\n interpolate_xyz_encoding=cfg.head.interpolate_xyz_encoding,\n enable_learnable_pe=cfg.head.enable_learnable_pe,\n enable_temporal_interpolation=cfg.head.enable_temporal_interpolation,\n )" }, { "identifier": "PropNetEstimator", "path": "third_party/nerfacc_prop_net.py", "snippet": "class PropNetEstimator(AbstractEstimator):\n \"\"\"Proposal network transmittance estimator.\n\n References: \"Mip-NeRF 360: Unbounded Anti-Aliased Neural Radiance Fields.\"\n\n Args:\n optimizer: The optimizer to use for the proposal networks.\n scheduler: The learning rate scheduler to use for the proposal networks.\n \"\"\"\n\n def __init__(\n self,\n optimizer: Optional[torch.optim.Optimizer] = None,\n scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,\n enable_anti_aliasing_loss: Optional[bool] = True,\n anti_aliasing_pulse_width: Optional[List[float]] = [0.03, 0.003],\n ) -> None:\n super().__init__()\n self.optimizer = optimizer\n self.scheduler = scheduler\n self.prop_cache: List = []\n self.enable_anti_aliasing_loss = enable_anti_aliasing_loss\n self.pulse_width = anti_aliasing_pulse_width\n if self.enable_anti_aliasing_loss:\n logger.info(\"Enable anti-aliasing loss, pulse width: %s\", self.pulse_width)\n\n @torch.no_grad()\n def sampling(\n self,\n prop_sigma_fns: List[Callable],\n prop_samples: List[int],\n num_samples: int,\n # rendering options\n n_rays: int,\n near_plane: float,\n far_plane: float,\n sampling_type: Literal[\n \"uniform\", \"lindisp\", \"sqrt\", \"log\", \"uniform_lindisp\"\n ] = \"uniform_lindisp\",\n # training options\n stratified: bool = False,\n requires_grad: bool = False,\n ) -> Tuple[Tensor, Tensor]:\n \"\"\"Sampling with CDFs from proposal networks.\n\n Note:\n When `requires_grad` is `True`, the gradients are allowed to flow\n through the proposal networks, and the outputs of the proposal\n networks are cached to update them later when calling `update_every_n_steps()`\n\n Args:\n prop_sigma_fns: Proposal network evaluate functions. It should be a list\n of functions that take in samples {t_starts (n_rays, n_samples),\n t_ends (n_rays, n_samples)} and returns the post-activation densities\n (n_rays, n_samples).\n prop_samples: Number of samples to draw from each proposal network. Should\n be the same length as `prop_sigma_fns`.\n num_samples: Number of samples to draw in the end.\n n_rays: Number of rays.\n near_plane: Near plane.\n far_plane: Far plane.\n sampling_type: Sampling type. Either \"uniform\" or \"lindisp\". Default to\n \"lindisp\".\n stratified: Whether to use stratified sampling. Default to `False`.\n requires_grad: Whether to allow gradients to flow through the proposal\n networks. Default to `False`.\n\n Returns:\n A tuple of {Tensor, Tensor}:\n\n - **t_starts**: The starts of the samples. Shape (n_rays, num_samples).\n - **t_ends**: The ends of the samples. Shape (n_rays, num_samples).\n\n \"\"\"\n assert len(prop_sigma_fns) == len(prop_samples), (\n \"The number of proposal networks and the number of samples \"\n \"should be the same.\"\n )\n cdfs = torch.cat(\n [\n torch.zeros((n_rays, 1), device=self.device),\n torch.ones((n_rays, 1), device=self.device),\n ],\n dim=-1,\n )\n intervals = RayIntervals(vals=cdfs)\n\n for i, (level_fn, level_samples) in enumerate(\n zip(prop_sigma_fns, prop_samples)\n ):\n intervals, _ = importance_sampling(\n intervals, cdfs, level_samples, stratified\n )\n t_vals = _transform_stot(\n sampling_type, intervals.vals, near_plane, far_plane\n )\n t_starts = t_vals[..., :-1]\n t_ends = t_vals[..., 1:]\n\n with torch.set_grad_enabled(requires_grad):\n sigmas = level_fn(t_starts, t_ends)[\"density\"].squeeze(-1)\n assert sigmas.shape == t_starts.shape\n trans, _ = render_transmittance_from_density(t_starts, t_ends, sigmas)\n cdfs = 1.0 - torch.cat(\n [trans, torch.zeros_like(trans[..., :1])], dim=-1\n )\n if requires_grad:\n self.prop_cache.append((intervals, cdfs, i))\n\n intervals, _ = importance_sampling(intervals, cdfs, num_samples, stratified)\n t_vals = _transform_stot(sampling_type, intervals.vals, near_plane, far_plane)\n t_starts = t_vals[..., :-1]\n t_ends = t_vals[..., 1:]\n if requires_grad:\n self.prop_cache.append((intervals, None, None))\n\n return t_starts, t_ends\n\n @torch.enable_grad()\n def compute_loss(self, trans: Tensor, loss_scaler: float = 1.0) -> Tensor:\n \"\"\"Compute the loss for the proposal networks.\n\n Args:\n trans: The transmittance of all samples. Shape (n_rays, num_samples).\n loss_scaler: The loss scaler. Default to 1.0.\n\n Returns:\n The loss for the proposal networks.\n \"\"\"\n if len(self.prop_cache) == 0:\n return torch.zeros((), device=self.device)\n\n intervals, _, _ = self.prop_cache.pop()\n # get cdfs at all edges of intervals\n cdfs = 1.0 - torch.cat([trans, torch.zeros_like(trans[..., :1])], dim=-1)\n cdfs = cdfs.detach()\n loss = 0.0\n\n if self.enable_anti_aliasing_loss:\n w_normalize = (cdfs[..., 1:] - cdfs[..., :-1]) / (\n intervals.vals[..., 1:] - intervals.vals[..., :-1]\n )\n c1, w1 = blur_stepfun(intervals.vals, w_normalize, self.pulse_width[0])\n c2, w2 = blur_stepfun(intervals.vals, w_normalize, self.pulse_width[1])\n area1 = 0.5 * (w1[..., 1:] + w1[..., :-1]) * (c1[..., 1:] - c1[..., :-1])\n area2 = 0.5 * (w2[..., 1:] + w2[..., :-1]) * (c2[..., 1:] - c2[..., :-1])\n cdfs1 = torch.cat(\n [\n torch.zeros_like(area1[..., :1]),\n torch.cumsum(area1, dim=-1),\n ],\n dim=-1,\n )\n cdfs2 = torch.cat(\n [\n torch.zeros_like(area2[..., :1]),\n torch.cumsum(area2, dim=-1),\n ],\n dim=-1,\n )\n cs = [c1, c2]\n ws = [w1, w2]\n _cdfs = [cdfs1, cdfs2]\n while self.prop_cache:\n prop_intervals, prop_cdfs, prop_id = self.prop_cache.pop()\n wp = prop_cdfs[..., 1:] - prop_cdfs[..., :-1]\n cdf_interp = sorted_interp_quad(\n prop_intervals.vals, cs[prop_id], ws[prop_id], _cdfs[prop_id]\n )\n w_s = torch.diff(cdf_interp, dim=-1)\n loss += ((w_s - wp).clamp_min(0) ** 2 / (wp + 1e-5)).mean()\n else:\n while self.prop_cache:\n prop_intervals, prop_cdfs, _ = self.prop_cache.pop()\n loss += _pdf_loss(intervals, cdfs, prop_intervals, prop_cdfs).mean()\n return loss * loss_scaler\n\n @torch.enable_grad()\n def update_every_n_steps(\n self,\n trans: Tensor,\n requires_grad: bool = False,\n loss_scaler: float = 1.0,\n ) -> float:\n \"\"\"Update the estimator every n steps during training.\n\n Args:\n trans: The transmittance of all samples. Shape (n_rays, num_samples).\n requires_grad: Whether to allow gradients to flow through the proposal\n networks. Default to `False`.\n loss_scaler: The loss scaler to use. Default to 1.0.\n\n Returns:\n The loss of the proposal networks for logging (a float scalar).\n \"\"\"\n if requires_grad:\n return self._update(trans=trans, loss_scaler=loss_scaler)\n else:\n if self.scheduler is not None:\n self.scheduler.step()\n return 0.0\n\n @torch.enable_grad()\n def _update(self, trans: Tensor, loss_scaler: float = 1.0) -> float:\n assert len(self.prop_cache) > 0\n assert self.optimizer is not None, \"No optimizer is provided.\"\n\n loss = self.compute_loss(trans, loss_scaler)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n if self.scheduler is not None:\n self.scheduler.step()\n return loss.item()" } ]
import itertools import logging import torch from typing import List, Tuple from omegaconf import OmegaConf from datasets.base import SceneDataset from radiance_fields import ( DensityField, RadianceField, build_density_field, build_radiance_field_from_cfg, ) from third_party.nerfacc_prop_net import PropNetEstimator
11,630
logger = logging.getLogger() def build_model_from_cfg( cfg: OmegaConf,
logger = logging.getLogger() def build_model_from_cfg( cfg: OmegaConf,
dataset: SceneDataset,
0
2023-10-11 20:56:27+00:00
16k
alibaba-damo-academy/FunCodec
funcodec/train/gan_trainer.py
[ { "identifier": "AbsBatchStepScheduler", "path": "funcodec/schedulers/abs_scheduler.py", "snippet": "class AbsBatchStepScheduler(AbsScheduler):\n @abstractmethod\n def step(self, epoch: int = None):\n pass\n\n @abstractmethod\n def state_dict(self):\n pass\n\n @abstractmethod\n def load_state_dict(self, state):\n pass" }, { "identifier": "AbsScheduler", "path": "funcodec/schedulers/abs_scheduler.py", "snippet": "class AbsScheduler(ABC):\n @abstractmethod\n def step(self, epoch: int = None):\n pass\n\n @abstractmethod\n def state_dict(self):\n pass\n\n @abstractmethod\n def load_state_dict(self, state):\n pass" }, { "identifier": "to_device", "path": "funcodec/torch_utils/device_funcs.py", "snippet": "def to_device(data, device=None, dtype=None, non_blocking=False, copy=False):\n \"\"\"Change the device of object recursively\"\"\"\n if isinstance(data, dict):\n return {\n k: to_device(v, device, dtype, non_blocking, copy) for k, v in data.items()\n }\n elif dataclasses.is_dataclass(data) and not isinstance(data, type):\n return type(data)(\n *[\n to_device(v, device, dtype, non_blocking, copy)\n for v in dataclasses.astuple(data)\n ]\n )\n # maybe namedtuple. I don't know the correct way to judge namedtuple.\n elif isinstance(data, tuple) and type(data) is not tuple:\n return type(data)(\n *[to_device(o, device, dtype, non_blocking, copy) for o in data]\n )\n elif isinstance(data, (list, tuple)):\n return type(data)(to_device(v, device, dtype, non_blocking, copy) for v in data)\n elif isinstance(data, np.ndarray):\n return to_device(torch.from_numpy(data), device, dtype, non_blocking, copy)\n elif isinstance(data, torch.Tensor):\n return data.to(device, dtype, non_blocking, copy)\n else:\n return data" }, { "identifier": "recursive_average", "path": "funcodec/torch_utils/recursive_op.py", "snippet": "def recursive_average(obj, weight: torch.Tensor, distributed: bool = False):\n obj = recursive_sum(obj, weight, distributed)\n weight = weight.sum()\n if distributed:\n torch.distributed.all_reduce(weight, op=ReduceOp.SUM)\n # Normalize weight to be sum-to-1\n obj = recursive_divide(obj, weight)\n return obj, weight" }, { "identifier": "DistributedOption", "path": "funcodec/train/distributed_utils.py", "snippet": "class DistributedOption:\n # Enable distributed Training\n distributed: bool = False\n # torch.distributed.Backend: \"nccl\", \"mpi\", \"gloo\", or \"tcp\"\n dist_backend: str = \"nccl\"\n # if init_method=\"env://\",\n # env values of \"MASTER_PORT\", \"MASTER_ADDR\", \"WORLD_SIZE\", and \"RANK\" are referred.\n dist_init_method: str = \"env://\"\n dist_world_size: Optional[int] = None\n dist_rank: Optional[int] = None\n local_rank: Optional[int] = None\n ngpu: int = 0\n dist_master_addr: Optional[str] = None\n dist_master_port: Optional[int] = None\n dist_launcher: Optional[str] = None\n multiprocessing_distributed: bool = True\n\n def init_options(self):\n if self.distributed:\n if self.dist_init_method == \"env://\":\n if get_master_addr(self.dist_master_addr, self.dist_launcher) is None:\n raise RuntimeError(\n \"--dist_master_addr or MASTER_ADDR must be set \"\n \"if --dist_init_method == 'env://'\"\n )\n if get_master_port(self.dist_master_port) is None:\n raise RuntimeError(\n \"--dist_master_port or MASTER_PORT must be set \"\n \"if --dist_init_port == 'env://'\"\n )\n\n def init_torch_distributed(self, args):\n if self.distributed:\n # See:\n # https://docs.nvidia.com/deeplearning/sdk/nccl-developer-guide/docs/env.html\n os.environ.setdefault(\"NCCL_DEBUG\", \"INFO\")\n\n # See:\n # https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group\n os.environ.setdefault(\"NCCL_BLOCKING_WAIT\", \"1\")\n\n if args.dist_rank is not None and args.dist_world_size is not None:\n torch.distributed.init_process_group(backend='nccl',\n init_method=self.dist_init_method,\n world_size=args.dist_world_size,\n rank=args.dist_rank)\n else:\n torch.distributed.init_process_group(backend='nccl', init_method='env://')\n self.dist_rank = torch.distributed.get_rank()\n self.dist_world_size = torch.distributed.get_world_size()\n self.local_rank = args.local_rank\n\n def init_options_pai(self):\n if self.distributed:\n if self.dist_init_method == \"env://\":\n if get_master_addr(self.dist_master_addr, self.dist_launcher) is None:\n raise RuntimeError(\n \"--dist_master_addr or MASTER_ADDR must be set \"\n \"if --dist_init_method == 'env://'\"\n )\n if get_master_port(self.dist_master_port) is None:\n raise RuntimeError(\n \"--dist_master_port or MASTER_PORT must be set \"\n \"if --dist_init_port == 'env://'\"\n )\n\n self.dist_rank = get_rank(self.dist_rank, self.dist_launcher)\n self.dist_world_size = get_world_size(\n self.dist_world_size, self.dist_launcher\n )\n self.local_rank = get_local_rank(self.local_rank, self.dist_launcher)\n\n if (\n self.dist_rank is not None\n and self.dist_world_size is not None\n and self.dist_rank >= self.dist_world_size\n ):\n raise RuntimeError(\n f\"RANK >= WORLD_SIZE: {self.dist_rank} >= {self.dist_world_size}\"\n )\n\n if self.dist_init_method == \"env://\":\n self.dist_master_addr = get_master_addr(\n self.dist_master_addr, self.dist_launcher\n )\n self.dist_master_port = get_master_port(self.dist_master_port)\n if (\n self.dist_master_addr is not None\n and self.dist_master_port is not None\n ):\n self.dist_init_method = (\n f\"tcp://{self.dist_master_addr}:{self.dist_master_port}\"\n )\n\n def init_torch_distributed_pai(self, args):\n if self.distributed:\n # See:\n # https://docs.nvidia.com/deeplearning/sdk/nccl-developer-guide/docs/env.html\n os.environ.setdefault(\"NCCL_DEBUG\", \"INFO\")\n\n # See:\n # https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group\n os.environ.setdefault(\"NCCL_BLOCKING_WAIT\", \"1\")\n\n torch.distributed.init_process_group(backend='nccl', init_method='env://')\n torch.distributed.barrier()\n self.dist_rank = torch.distributed.get_rank()\n self.dist_world_size = torch.distributed.get_world_size()\n self.local_rank = args.local_rank" }, { "identifier": "SubReporter", "path": "funcodec/train/reporter.py", "snippet": "class SubReporter:\n \"\"\"This class is used in Reporter.\n\n See the docstring of Reporter for the usage.\n \"\"\"\n\n def __init__(self, key: str, epoch: int, total_count: int):\n assert check_argument_types()\n self.key = key\n self.epoch = epoch\n self.start_time = time.perf_counter()\n self.stats = defaultdict(list)\n self._finished = False\n self.total_count = total_count\n self.count = 0\n self._seen_keys_in_the_step = set()\n\n def get_total_count(self) -> int:\n \"\"\"Returns the number of iterations over all epochs.\"\"\"\n return self.total_count\n\n def get_epoch(self) -> int:\n return self.epoch\n\n def next(self):\n \"\"\"Close up this step and reset state for the next step\"\"\"\n for key, stats_list in self.stats.items():\n if key not in self._seen_keys_in_the_step:\n # Fill nan value if the key is not registered in this step\n if isinstance(stats_list[0], WeightedAverage):\n stats_list.append(to_reported_value(np.nan, 0))\n elif isinstance(stats_list[0], Average):\n stats_list.append(to_reported_value(np.nan))\n else:\n raise NotImplementedError(f\"type={type(stats_list[0])}\")\n\n assert len(stats_list) == self.count, (len(stats_list), self.count)\n\n self._seen_keys_in_the_step = set()\n\n def register(\n self,\n stats: Dict[str, Optional[Union[Num, Dict[str, Num]]]],\n weight: Num = None,\n ) -> None:\n assert check_argument_types()\n if self._finished:\n raise RuntimeError(\"Already finished\")\n if len(self._seen_keys_in_the_step) == 0:\n # Increment count as the first register in this step\n self.total_count += 1\n self.count += 1\n\n for key2, v in stats.items():\n if key2 in _reserved:\n raise RuntimeError(f\"{key2} is reserved.\")\n if key2 in self._seen_keys_in_the_step:\n raise RuntimeError(f\"{key2} is registered twice.\")\n if v is None:\n v = np.nan\n r = to_reported_value(v, weight)\n\n if key2 not in self.stats:\n # If it's the first time to register the key,\n # append nan values in front of the the value\n # to make it same length to the other stats\n # e.g.\n # stat A: [0.4, 0.3, 0.5]\n # stat B: [nan, nan, 0.2]\n nan = to_reported_value(np.nan, None if weight is None else 0)\n self.stats[key2].extend(\n r if i == self.count - 1 else nan for i in range(self.count)\n )\n else:\n self.stats[key2].append(r)\n self._seen_keys_in_the_step.add(key2)\n\n def log_message(self, start: int = None, end: int = None, num_updates: int = None) -> str:\n if self._finished:\n raise RuntimeError(\"Already finished\")\n if start is None:\n start = 0\n if start < 0:\n start = self.count + start\n if end is None:\n end = self.count\n\n if self.count == 0 or start == end:\n return \"\"\n\n message = f\"{self.epoch}epoch:{self.key}:\" f\"{start + 1}-{end}batch:\"\n if num_updates is not None:\n message += f\"{num_updates}num_updates: \"\n\n for idx, (key2, stats_list) in enumerate(self.stats.items()):\n assert len(stats_list) == self.count, (len(stats_list), self.count)\n # values: List[ReportValue]\n values = stats_list[start:end]\n if idx != 0 and idx != len(stats_list):\n message += \", \"\n\n v = aggregate(values)\n if abs(v) > 1.0e3:\n message += f\"{key2}={v:.3e}\"\n elif abs(v) > 1.0e-3:\n message += f\"{key2}={v:.3f}\"\n else:\n message += f\"{key2}={v:.3e}\"\n return message\n\n def tensorboard_add_scalar(self, summary_writer, start: int = None):\n if start is None:\n start = 0\n if start < 0:\n start = self.count + start\n\n for key2, stats_list in self.stats.items():\n assert len(stats_list) == self.count, (len(stats_list), self.count)\n # values: List[ReportValue]\n values = stats_list[start:]\n v = aggregate(values)\n summary_writer.add_scalar(f\"{key2}\", v, self.total_count)\n\n def wandb_log(self, start: int = None):\n import wandb\n\n if start is None:\n start = 0\n if start < 0:\n start = self.count + start\n\n d = {}\n for key2, stats_list in self.stats.items():\n assert len(stats_list) == self.count, (len(stats_list), self.count)\n # values: List[ReportValue]\n values = stats_list[start:]\n v = aggregate(values)\n d[wandb_get_prefix(key2) + key2] = v\n d[\"iteration\"] = self.total_count\n wandb.log(d)\n\n def finished(self) -> None:\n self._finished = True\n\n @contextmanager\n def measure_time(self, name: str):\n start = time.perf_counter()\n yield start\n t = time.perf_counter() - start\n self.register({name: t})\n\n def measure_iter_time(self, iterable, name: str):\n iterator = iter(iterable)\n while True:\n try:\n start = time.perf_counter()\n retval = next(iterator)\n t = time.perf_counter() - start\n self.register({name: t})\n yield retval\n except StopIteration:\n break" }, { "identifier": "Trainer", "path": "funcodec/train/trainer.py", "snippet": "class Trainer:\n \"\"\"Trainer having an optimizer.\n\n If you'd like to use multiple optimizers, then inherit this class\n and override the methods if necessary - at least \"train_one_epoch()\"\n\n >>> class TwoOptimizerTrainer(Trainer):\n ... @classmethod\n ... def add_arguments(cls, parser):\n ... ...\n ...\n ... @classmethod\n ... def train_one_epoch(cls, model, optimizers, ...):\n ... loss1 = model.model1(...)\n ... loss1.backward()\n ... optimizers[0].step()\n ...\n ... loss2 = model.model2(...)\n ... loss2.backward()\n ... optimizers[1].step()\n\n \"\"\"\n\n def __init__(self):\n raise RuntimeError(\"This class can't be instantiated.\")\n\n @classmethod\n def build_options(cls, args: argparse.Namespace) -> TrainerOptions:\n \"\"\"Build options consumed by train(), eval()\"\"\"\n assert check_argument_types()\n return build_dataclass(TrainerOptions, args)\n\n @classmethod\n def add_arguments(cls, parser: argparse.ArgumentParser):\n \"\"\"Reserved for future development of another Trainer\"\"\"\n pass\n\n @staticmethod\n def resume(\n checkpoint: Union[str, Path],\n model: torch.nn.Module,\n reporter: Reporter,\n optimizers: Sequence[torch.optim.Optimizer],\n schedulers: Sequence[Optional[AbsScheduler]],\n scaler: Optional[GradScaler],\n ngpu: int = 0,\n oss_bucket=None,\n ):\n logging.info(f\"Try to resume from {checkpoint}.\")\n if oss_bucket is None:\n if os.path.exists(checkpoint):\n states = torch.load(\n checkpoint,\n map_location=f\"cuda:{torch.cuda.current_device()}\" if ngpu > 0 else \"cpu\",\n )\n else:\n return 0\n else:\n if oss_bucket.object_exists(checkpoint):\n buffer = BytesIO(oss_bucket.get_object(checkpoint).read())\n states = torch.load(buffer, map_location=f\"cuda:{torch.cuda.current_device()}\" if ngpu > 0 else \"cpu\",)\n else:\n return 0\n from funcodec.torch_utils.load_pretrained_model import filter_state_dict\n dst_state = model.state_dict()\n src_state = states[\"model\"]\n src_state = filter_state_dict(dst_state, src_state)\n # logging.info(\"Resumed src_state keys: {}\".format(src_state.keys()))\n dst_state.update(src_state)\n model.load_state_dict(dst_state)\n # model.load_state_dict(states[\"model\"])\n reporter.load_state_dict(states[\"reporter\"])\n for optimizer, state in zip(optimizers, states[\"optimizers\"]):\n optimizer.load_state_dict(state)\n for scheduler, state in zip(schedulers, states[\"schedulers\"]):\n if scheduler is not None:\n scheduler.load_state_dict(state)\n if scaler is not None:\n if states[\"scaler\"] is None:\n logging.warning(\"scaler state is not found\")\n else:\n scaler.load_state_dict(states[\"scaler\"])\n\n logging.info(f\"The training was resumed using {checkpoint}\")\n\n @classmethod\n def run(\n cls,\n model: AbsESPnetModel,\n optimizers: Sequence[torch.optim.Optimizer],\n schedulers: Sequence[Optional[AbsScheduler]],\n train_iter_factory: AbsIterFactory,\n valid_iter_factory: AbsIterFactory,\n trainer_options,\n distributed_option: DistributedOption,\n ) -> None:\n \"\"\"Perform training. This method performs the main process of training.\"\"\"\n assert check_argument_types()\n # NOTE(kamo): Don't check the type more strictly as far trainer_options\n assert is_dataclass(trainer_options), type(trainer_options)\n assert len(optimizers) == len(schedulers), (len(optimizers), len(schedulers))\n\n if isinstance(trainer_options.keep_nbest_models, int):\n keep_nbest_models = [trainer_options.keep_nbest_models]\n else:\n if len(trainer_options.keep_nbest_models) == 0:\n logging.warning(\"No keep_nbest_models is given. Change to [1]\")\n trainer_options.keep_nbest_models = [1]\n keep_nbest_models = trainer_options.keep_nbest_models\n\n output_dir = Path(trainer_options.output_dir)\n reporter = Reporter()\n if trainer_options.use_amp:\n if LooseVersion(torch.__version__) < LooseVersion(\"1.6.0\"):\n raise RuntimeError(\n \"Require torch>=1.6.0 for Automatic Mixed Precision\"\n )\n if trainer_options.sharded_ddp:\n if fairscale is None:\n raise RuntimeError(\n \"Requiring fairscale. Do 'pip install fairscale'\"\n )\n scaler = fairscale.optim.grad_scaler.ShardedGradScaler()\n else:\n scaler = GradScaler()\n else:\n scaler = None\n\n if trainer_options.resume:\n cls.resume(\n checkpoint=os.path.join(trainer_options.output_dir, \"checkpoint.pth\"),\n model=model,\n optimizers=optimizers,\n schedulers=schedulers,\n reporter=reporter,\n scaler=scaler,\n ngpu=trainer_options.ngpu,\n oss_bucket=trainer_options.oss_bucket,\n )\n\n start_epoch = reporter.get_epoch() + 1\n if start_epoch == trainer_options.max_epoch + 1:\n logging.warning(\n f\"The training has already reached at max_epoch: {start_epoch}\"\n )\n\n if distributed_option.distributed:\n if trainer_options.sharded_ddp:\n dp_model = fairscale.nn.data_parallel.ShardedDataParallel(\n module=model,\n sharded_optimizer=optimizers,\n )\n else:\n dp_model = torch.nn.parallel.DistributedDataParallel(\n model, find_unused_parameters=trainer_options.unused_parameters)\n elif distributed_option.ngpu > 1:\n dp_model = torch.nn.parallel.DataParallel(\n model,\n device_ids=list(range(distributed_option.ngpu)),\n )\n else:\n # NOTE(kamo): DataParallel also should work with ngpu=1,\n # but for debuggability it's better to keep this block.\n dp_model = model\n\n if trainer_options.use_tensorboard and (\n not distributed_option.distributed or distributed_option.dist_rank == 0\n ):\n from torch.utils.tensorboard import SummaryWriter\n if trainer_options.use_pai:\n train_summary_writer = SummaryWriter(\n os.path.join(trainer_options.output_dir, \"tensorboard/train\")\n )\n valid_summary_writer = SummaryWriter(\n os.path.join(trainer_options.output_dir, \"tensorboard/valid\")\n )\n else:\n train_summary_writer = SummaryWriter(\n str(output_dir / \"tensorboard\" / \"train\")\n )\n valid_summary_writer = SummaryWriter(\n str(output_dir / \"tensorboard\" / \"valid\")\n )\n else:\n train_summary_writer = None\n\n start_time = time.perf_counter()\n for iepoch in range(start_epoch, trainer_options.max_epoch + 1):\n if iepoch != start_epoch:\n logging.info(\n \"{}/{}epoch started. Estimated time to finish: {}\".format(\n iepoch,\n trainer_options.max_epoch,\n humanfriendly.format_timespan(\n (time.perf_counter() - start_time)\n / (iepoch - start_epoch)\n * (trainer_options.max_epoch - iepoch + 1)\n ),\n )\n )\n else:\n logging.info(f\"{iepoch}/{trainer_options.max_epoch}epoch started\")\n set_all_random_seed(trainer_options.seed + iepoch)\n\n reporter.set_epoch(iepoch)\n # 1. Train and validation for one-epoch\n with reporter.observe(\"train\") as sub_reporter:\n all_steps_are_invalid, max_update_stop = cls.train_one_epoch(\n model=dp_model,\n optimizers=optimizers,\n schedulers=schedulers,\n iterator=train_iter_factory.build_iter(iepoch),\n reporter=sub_reporter,\n scaler=scaler,\n summary_writer=train_summary_writer,\n options=trainer_options,\n distributed_option=distributed_option,\n par_reporter=reporter,\n )\n\n with reporter.observe(\"valid\") as sub_reporter:\n cls.validate_one_epoch(\n model=dp_model,\n iterator=valid_iter_factory.build_iter(iepoch),\n reporter=sub_reporter,\n options=trainer_options,\n distributed_option=distributed_option,\n )\n\n # 2. LR Scheduler step\n for scheduler in schedulers:\n if isinstance(scheduler, AbsValEpochStepScheduler):\n scheduler.step(\n reporter.get_value(*trainer_options.val_scheduler_criterion)\n )\n elif isinstance(scheduler, AbsEpochStepScheduler):\n scheduler.step()\n if trainer_options.sharded_ddp:\n for optimizer in optimizers:\n if isinstance(optimizer, fairscale.optim.oss.OSS):\n optimizer.consolidate_state_dict()\n\n if not distributed_option.distributed or distributed_option.dist_rank == 0:\n # 3. Report the results\n logging.info(reporter.log_message())\n if train_summary_writer is not None:\n reporter.tensorboard_add_scalar(train_summary_writer, key1=\"train\")\n reporter.tensorboard_add_scalar(valid_summary_writer, key1=\"valid\")\n if trainer_options.use_wandb:\n reporter.wandb_log()\n\n # save tensorboard on oss\n if trainer_options.use_pai and train_summary_writer is not None:\n def write_tensorboard_summary(summary_writer_path, oss_bucket):\n file_list = []\n for root, dirs, files in os.walk(summary_writer_path, topdown=False):\n for name in files:\n file_full_path = os.path.join(root, name)\n file_list.append(file_full_path)\n\n for file_full_path in file_list:\n with open(file_full_path, \"rb\") as f:\n oss_bucket.put_object(file_full_path, f)\n\n write_tensorboard_summary(os.path.join(trainer_options.output_dir, \"tensorboard/train\"), trainer_options.oss_bucket)\n write_tensorboard_summary(os.path.join(trainer_options.output_dir, \"tensorboard/valid\"), trainer_options.oss_bucket)\n\n\n # 4. Save/Update the checkpoint\n if trainer_options.use_pai:\n buffer = BytesIO()\n torch.save(\n {\n \"model\": model.state_dict(),\n \"reporter\": reporter.state_dict(),\n \"optimizers\": [o.state_dict() for o in optimizers],\n \"schedulers\": [\n s.state_dict() if s is not None else None\n for s in schedulers\n ],\n \"scaler\": scaler.state_dict() if scaler is not None else None,\n \"ema_model\": model.encoder.ema.model.state_dict()\n if hasattr(model.encoder, \"ema\") and model.encoder.ema is not None else None,\n },\n buffer,\n )\n trainer_options.oss_bucket.put_object(os.path.join(trainer_options.output_dir, \"checkpoint.pth\"), buffer.getvalue())\n else:\n torch.save(\n {\n \"model\": model.state_dict(),\n \"reporter\": reporter.state_dict(),\n \"optimizers\": [o.state_dict() for o in optimizers],\n \"schedulers\": [\n s.state_dict() if s is not None else None\n for s in schedulers\n ],\n \"scaler\": scaler.state_dict() if scaler is not None else None,\n },\n output_dir / \"checkpoint.pth\",\n )\n\n # 5. Save and log the model and update the link to the best model\n if trainer_options.use_pai:\n buffer = BytesIO()\n torch.save(model.state_dict(), buffer)\n trainer_options.oss_bucket.put_object(os.path.join(trainer_options.output_dir,\n f\"{iepoch}epoch.pth\"),buffer.getvalue())\n else:\n torch.save(model.state_dict(), output_dir / f\"{iepoch}epoch.pth\")\n\n # Creates a sym link latest.pth -> {iepoch}epoch.pth\n if trainer_options.use_pai:\n p = os.path.join(trainer_options.output_dir, \"latest.pth\")\n if trainer_options.oss_bucket.object_exists(p):\n trainer_options.oss_bucket.delete_object(p)\n trainer_options.oss_bucket.copy_object(trainer_options.oss_bucket.bucket_name,\n os.path.join(trainer_options.output_dir, f\"{iepoch}epoch.pth\"), p)\n else:\n p = output_dir / \"latest.pth\"\n if p.is_symlink() or p.exists():\n p.unlink()\n p.symlink_to(f\"{iepoch}epoch.pth\")\n\n _improved = []\n for _phase, k, _mode in trainer_options.best_model_criterion:\n # e.g. _phase, k, _mode = \"train\", \"loss\", \"min\"\n if reporter.has(_phase, k):\n best_epoch = reporter.get_best_epoch(_phase, k, _mode)\n # Creates sym links if it's the best result\n if best_epoch == iepoch:\n if trainer_options.use_pai:\n p = os.path.join(trainer_options.output_dir, f\"{_phase}.{k}.best.pth\")\n if trainer_options.oss_bucket.object_exists(p):\n trainer_options.oss_bucket.delete_object(p)\n trainer_options.oss_bucket.copy_object(trainer_options.oss_bucket.bucket_name,\n os.path.join(trainer_options.output_dir, f\"{iepoch}epoch.pth\"),p)\n else:\n p = output_dir / f\"{_phase}.{k}.best.pth\"\n if p.is_symlink() or p.exists():\n p.unlink()\n p.symlink_to(f\"{iepoch}epoch.pth\")\n _improved.append(f\"{_phase}.{k}\")\n if len(_improved) == 0:\n logging.info(\"There are no improvements in this epoch\")\n else:\n logging.info(\n \"The best model has been updated: \" + \", \".join(_improved)\n )\n\n log_model = (\n trainer_options.wandb_model_log_interval > 0\n and iepoch % trainer_options.wandb_model_log_interval == 0\n )\n if log_model and trainer_options.use_wandb:\n import wandb\n\n logging.info(\"Logging Model on this epoch :::::\")\n artifact = wandb.Artifact(\n name=f\"model_{wandb.run.id}\",\n type=\"model\",\n metadata={\"improved\": _improved},\n )\n artifact.add_file(str(output_dir / f\"{iepoch}epoch.pth\"))\n aliases = [\n f\"epoch-{iepoch}\",\n \"best\" if best_epoch == iepoch else \"\",\n ]\n wandb.log_artifact(artifact, aliases=aliases)\n\n # 6. Remove the model files excluding n-best epoch and latest epoch\n _removed = []\n # Get the union set of the n-best among multiple criterion\n nbests = set().union(\n *[\n set(reporter.sort_epochs(ph, k, m)[: max(keep_nbest_models)])\n for ph, k, m in trainer_options.best_model_criterion\n if reporter.has(ph, k)\n ]\n )\n\n # Generated n-best averaged model\n if (\n trainer_options.nbest_averaging_interval > 0\n and iepoch % trainer_options.nbest_averaging_interval == 0\n ):\n average_nbest_models(\n reporter=reporter,\n output_dir=output_dir,\n best_model_criterion=trainer_options.best_model_criterion,\n nbest=keep_nbest_models,\n suffix=f\"till{iepoch}epoch\",\n oss_bucket=trainer_options.oss_bucket,\n pai_output_dir=trainer_options.output_dir,\n )\n\n for e in range(1, iepoch):\n if trainer_options.use_pai:\n p = os.path.join(trainer_options.output_dir, f\"{e}epoch.pth\")\n if trainer_options.oss_bucket.object_exists(p) and e not in nbests:\n trainer_options.oss_bucket.delete_object(p)\n _removed.append(str(p))\n else:\n p = output_dir / f\"{e}epoch.pth\"\n if p.exists() and e not in nbests:\n p.unlink()\n _removed.append(str(p))\n if len(_removed) != 0:\n logging.info(\"The model files were removed: \" + \", \".join(_removed))\n\n # 7. If any updating haven't happened, stops the training\n if all_steps_are_invalid:\n logging.warning(\n f\"The gradients at all steps are invalid in this epoch. \"\n f\"Something seems wrong. This training was stopped at {iepoch}epoch\"\n )\n break\n\n if max_update_stop:\n logging.info(\n f\"Stopping training due to \"\n f\"num_updates: {trainer_options.num_updates} >= max_update: {trainer_options.max_update}\"\n )\n break\n\n # 8. Check early stopping\n if trainer_options.patience is not None:\n if reporter.check_early_stopping(\n trainer_options.patience, *trainer_options.early_stopping_criterion\n ):\n break\n\n gc.collect()\n\n else:\n logging.info(\n f\"The training was finished at {trainer_options.max_epoch} epochs \"\n )\n\n # Generated n-best averaged model\n if not distributed_option.distributed or distributed_option.dist_rank == 0:\n average_nbest_models(\n reporter=reporter,\n output_dir=output_dir,\n best_model_criterion=trainer_options.best_model_criterion,\n nbest=keep_nbest_models,\n oss_bucket=trainer_options.oss_bucket,\n pai_output_dir=trainer_options.output_dir,\n )\n\n @classmethod\n def train_one_epoch(\n cls,\n model: torch.nn.Module,\n iterator: Iterable[Tuple[List[str], Dict[str, torch.Tensor]]],\n optimizers: Sequence[torch.optim.Optimizer],\n schedulers: Sequence[Optional[AbsScheduler]],\n scaler: Optional[GradScaler],\n reporter: SubReporter,\n summary_writer,\n options: TrainerOptions,\n distributed_option: DistributedOption,\n **kwargs\n ) -> Tuple[bool, bool]:\n assert check_argument_types()\n\n grad_noise = options.grad_noise\n accum_grad = options.accum_grad\n grad_clip = options.grad_clip\n grad_clip_type = options.grad_clip_type\n log_interval = options.log_interval\n no_forward_run = options.no_forward_run\n ngpu = options.ngpu\n use_wandb = options.use_wandb\n distributed = distributed_option.distributed\n\n if log_interval is None:\n try:\n log_interval = max(len(iterator) // 20, 10)\n except TypeError:\n log_interval = 100\n\n model.train()\n all_steps_are_invalid = True\n max_update_stop = False\n # [For distributed] Because iteration counts are not always equals between\n # processes, send stop-flag to the other processes if iterator is finished\n iterator_stop = torch.tensor(0).to(\"cuda\" if ngpu > 0 else \"cpu\")\n\n start_time = time.perf_counter()\n for iiter, (_, batch) in enumerate(\n reporter.measure_iter_time(iterator, \"iter_time\"), 1\n ):\n assert isinstance(batch, dict), type(batch)\n\n if distributed:\n torch.distributed.all_reduce(iterator_stop, ReduceOp.SUM)\n if iterator_stop > 0:\n break\n\n batch = to_device(batch, \"cuda\" if ngpu > 0 else \"cpu\")\n if no_forward_run:\n all_steps_are_invalid = False\n continue\n\n with autocast(scaler is not None):\n with reporter.measure_time(\"forward_time\"):\n retval = model(**batch)\n\n # Note(kamo):\n # Supporting two patterns for the returned value from the model\n # a. dict type\n if isinstance(retval, dict):\n loss = retval[\"loss\"]\n stats = retval[\"stats\"]\n weight = retval[\"weight\"]\n optim_idx = retval.get(\"optim_idx\")\n if optim_idx is not None and not isinstance(optim_idx, int):\n if not isinstance(optim_idx, torch.Tensor):\n raise RuntimeError(\n \"optim_idx must be int or 1dim torch.Tensor, \"\n f\"but got {type(optim_idx)}\"\n )\n if optim_idx.dim() >= 2:\n raise RuntimeError(\n \"optim_idx must be int or 1dim torch.Tensor, \"\n f\"but got {optim_idx.dim()}dim tensor\"\n )\n if optim_idx.dim() == 1:\n for v in optim_idx:\n if v != optim_idx[0]:\n raise RuntimeError(\n \"optim_idx must be 1dim tensor \"\n \"having same values for all entries\"\n )\n optim_idx = optim_idx[0].item()\n else:\n optim_idx = optim_idx.item()\n\n # b. tuple or list type\n else:\n loss, stats, weight = retval\n optim_idx = None\n\n stats = {k: v for k, v in stats.items() if v is not None}\n if ngpu > 1 or distributed:\n # Apply weighted averaging for loss and stats\n loss = (loss * weight.type(loss.dtype)).sum()\n\n # if distributed, this method can also apply all_reduce()\n stats, weight = recursive_average(stats, weight, distributed)\n\n # Now weight is summation over all workers\n loss /= weight\n if distributed:\n # NOTE(kamo): Multiply world_size because DistributedDataParallel\n # automatically normalizes the gradient by world_size.\n loss *= torch.distributed.get_world_size()\n\n loss /= accum_grad\n\n reporter.register(stats, weight)\n\n with reporter.measure_time(\"backward_time\"):\n if scaler is not None:\n # Scales loss. Calls backward() on scaled loss\n # to create scaled gradients.\n # Backward passes under autocast are not recommended.\n # Backward ops run in the same dtype autocast chose\n # for corresponding forward ops.\n scaler.scale(loss).backward()\n else:\n loss.backward()\n\n if iiter % accum_grad == 0:\n if scaler is not None:\n # Unscales the gradients of optimizer's assigned params in-place\n for iopt, optimizer in enumerate(optimizers):\n if optim_idx is not None and iopt != optim_idx:\n continue\n scaler.unscale_(optimizer)\n\n # gradient noise injection\n if grad_noise:\n add_gradient_noise(\n model,\n reporter.get_total_count(),\n duration=100,\n eta=1.0,\n scale_factor=0.55,\n )\n\n # compute the gradient norm to check if it is normal or not\n grad_norm = torch.nn.utils.clip_grad_norm_(\n model.parameters(),\n max_norm=grad_clip,\n norm_type=grad_clip_type,\n )\n # PyTorch<=1.4, clip_grad_norm_ returns float value\n if not isinstance(grad_norm, torch.Tensor):\n grad_norm = torch.tensor(grad_norm)\n\n if not torch.isfinite(grad_norm):\n logging.warning(\n f\"The grad norm is {grad_norm}. Skipping updating the model.\"\n )\n\n # Must invoke scaler.update() if unscale_() is used in the iteration\n # to avoid the following error:\n # RuntimeError: unscale_() has already been called\n # on this optimizer since the last update().\n # Note that if the gradient has inf/nan values,\n # scaler.step skips optimizer.step().\n if scaler is not None:\n for iopt, optimizer in enumerate(optimizers):\n if optim_idx is not None and iopt != optim_idx:\n continue\n scaler.step(optimizer)\n scaler.update()\n\n else:\n all_steps_are_invalid = False\n with reporter.measure_time(\"optim_step_time\"):\n for iopt, (optimizer, scheduler) in enumerate(\n zip(optimizers, schedulers)\n ):\n if optim_idx is not None and iopt != optim_idx:\n continue\n if scaler is not None:\n # scaler.step() first unscales the gradients of\n # the optimizer's assigned params.\n scaler.step(optimizer)\n # Updates the scale for next iteration.\n scaler.update()\n else:\n optimizer.step()\n if isinstance(scheduler, AbsBatchStepScheduler):\n scheduler.step()\n for iopt, optimizer in enumerate(optimizers):\n if optim_idx is not None and iopt != optim_idx:\n continue\n optimizer.zero_grad()\n\n # Register lr and train/load time[sec/step],\n # where step refers to accum_grad * mini-batch\n reporter.register(\n dict(\n {\n f\"optim{i}_lr{j}\": pg[\"lr\"]\n for i, optimizer in enumerate(optimizers)\n for j, pg in enumerate(optimizer.param_groups)\n if \"lr\" in pg\n },\n train_time=time.perf_counter() - start_time,\n ),\n )\n start_time = time.perf_counter()\n\n # update num_updates\n if distributed:\n if hasattr(model.module, \"num_updates\"):\n model.module.set_num_updates(model.module.get_num_updates() + 1)\n options.num_updates = model.module.get_num_updates()\n if model.module.get_num_updates() >= options.max_update:\n max_update_stop = True\n else:\n if hasattr(model, \"num_updates\"):\n model.set_num_updates(model.get_num_updates() + 1)\n options.num_updates = model.get_num_updates()\n if model.get_num_updates() >= options.max_update:\n max_update_stop = True\n\n to_save_model = model.module if distributed else model\n if (hasattr(options, \"num_updates\") and\n options.save_ckpt_every_steps > 0 and\n options.num_updates % options.save_ckpt_every_steps == 0):\n if options.use_pai:\n buffer = BytesIO()\n torch.save(\n {\n \"model\": to_save_model.state_dict(),\n \"optimizers\": [o.state_dict() for o in optimizers],\n \"reporter\": kwargs[\"par_reporter\"].state_dict(),\n \"schedulers\": [\n s.state_dict() if s is not None else None\n for s in schedulers\n ],\n \"scaler\": scaler.state_dict() if scaler is not None else None,\n \"ema_model\": to_save_model.encoder.ema.model.state_dict()\n if hasattr(to_save_model.encoder, \"ema\") and to_save_model.encoder.ema is not None else None,\n },\n buffer,\n )\n options.oss_bucket.put_object(\n os.path.join(options.output_dir, f\"checkpoint-{options.num_updates}.pth\"), buffer.getvalue())\n else:\n torch.save(\n {\n \"model\": to_save_model.state_dict(),\n \"optimizers\": [o.state_dict() for o in optimizers],\n \"reporter\": kwargs[\"par_reporter\"].state_dict(),\n \"schedulers\": [\n s.state_dict() if s is not None else None\n for s in schedulers\n ],\n \"scaler\": scaler.state_dict() if scaler is not None else None,\n },\n options.output_dir / f\"checkpoint-{options.num_updates}.pth\",\n )\n\n # NOTE(kamo): Call log_message() after next()\n reporter.next()\n if iiter % log_interval == 0:\n num_updates = options.num_updates if hasattr(options, \"num_updates\") else None\n logging.info(reporter.log_message(-log_interval, num_updates=num_updates))\n if summary_writer is not None:\n reporter.tensorboard_add_scalar(summary_writer, -log_interval)\n if use_wandb:\n reporter.wandb_log()\n\n if max_update_stop:\n break\n\n else:\n if distributed:\n iterator_stop.fill_(1)\n torch.distributed.all_reduce(iterator_stop, ReduceOp.SUM)\n return all_steps_are_invalid, max_update_stop\n\n @classmethod\n @torch.no_grad()\n def validate_one_epoch(\n cls,\n model: torch.nn.Module,\n iterator: Iterable[Dict[str, torch.Tensor]],\n reporter: SubReporter,\n options: TrainerOptions,\n distributed_option: DistributedOption,\n ) -> None:\n assert check_argument_types()\n ngpu = options.ngpu\n no_forward_run = options.no_forward_run\n distributed = distributed_option.distributed\n\n model.eval()\n\n # [For distributed] Because iteration counts are not always equals between\n # processes, send stop-flag to the other processes if iterator is finished\n iterator_stop = torch.tensor(0).to(\"cuda\" if ngpu > 0 else \"cpu\")\n for (_, batch) in iterator:\n assert isinstance(batch, dict), type(batch)\n if distributed:\n torch.distributed.all_reduce(iterator_stop, ReduceOp.SUM)\n if iterator_stop > 0:\n break\n\n batch = to_device(batch, \"cuda\" if ngpu > 0 else \"cpu\")\n if no_forward_run:\n continue\n\n retval = model(**batch)\n if isinstance(retval, dict):\n stats = retval[\"stats\"]\n weight = retval[\"weight\"]\n else:\n _, stats, weight = retval\n if ngpu > 1 or distributed:\n # Apply weighted averaging for stats.\n # if distributed, this method can also apply all_reduce()\n stats, weight = recursive_average(stats, weight, distributed)\n\n reporter.register(stats, weight)\n reporter.next()\n\n else:\n if distributed:\n iterator_stop.fill_(1)\n torch.distributed.all_reduce(iterator_stop, ReduceOp.SUM)" }, { "identifier": "TrainerOptions", "path": "funcodec/train/trainer.py", "snippet": "class TrainerOptions:\n ngpu: int\n resume: bool\n use_amp: bool\n train_dtype: str\n grad_noise: bool\n accum_grad: int\n grad_clip: float\n grad_clip_type: float\n log_interval: Optional[int]\n no_forward_run: bool\n use_tensorboard: bool\n use_wandb: bool\n output_dir: Union[Path, str]\n max_epoch: int\n max_update: int\n seed: int\n sharded_ddp: bool\n patience: Optional[int]\n keep_nbest_models: Union[int, List[int]]\n nbest_averaging_interval: int\n early_stopping_criterion: Sequence[str]\n best_model_criterion: Sequence[Sequence[str]]\n val_scheduler_criterion: Sequence[str]\n unused_parameters: bool\n wandb_model_log_interval: int\n use_pai: bool\n oss_bucket: Any\n save_ckpt_every_steps: int" }, { "identifier": "build_dataclass", "path": "funcodec/utils/build_dataclass.py", "snippet": "def build_dataclass(dataclass, args: argparse.Namespace):\n \"\"\"Helper function to build dataclass from 'args'.\"\"\"\n kwargs = {}\n for field in dataclasses.fields(dataclass):\n if not hasattr(args, field.name):\n raise ValueError(\n f\"args doesn't have {field.name}. You need to set it to ArgumentsParser\"\n )\n check_type(field.name, getattr(args, field.name), field.type)\n kwargs[field.name] = getattr(args, field.name)\n return dataclass(**kwargs)" }, { "identifier": "str2bool", "path": "funcodec/utils/types.py", "snippet": "def str2bool(value: str) -> bool:\n return bool(strtobool(value))" } ]
import argparse import dataclasses import logging import time import numpy as np import torch import os import soundfile import gc import fairscale from contextlib import contextmanager from distutils.version import LooseVersion from typing import Dict from typing import Iterable from typing import List from typing import Optional from typing import Sequence from typing import Tuple from io import BytesIO from typeguard import check_argument_types from funcodec.schedulers.abs_scheduler import AbsBatchStepScheduler from funcodec.schedulers.abs_scheduler import AbsScheduler from funcodec.torch_utils.device_funcs import to_device from funcodec.torch_utils.recursive_op import recursive_average from funcodec.train.distributed_utils import DistributedOption from funcodec.train.reporter import SubReporter from funcodec.train.trainer import Trainer from funcodec.train.trainer import TrainerOptions from funcodec.utils.build_dataclass import build_dataclass from funcodec.utils.types import str2bool from torch.distributed import ReduceOp from torch.cuda.amp import autocast from torch.cuda.amp import GradScaler
11,288
# Copyright 2021 Tomoki Hayashi # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) # Adapted by Zhihao Du for GAN-based Codec models. """Trainer module for GAN-based training.""" if torch.distributed.is_available(): if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"): else: # Nothing to do if torch<1.6.0 @contextmanager def autocast(enabled=True): # NOQA yield GradScaler = None try: except ImportError: fairscale = None @dataclasses.dataclass class GANTrainerOptions(TrainerOptions): """Trainer option dataclass for GANTrainer.""" generator_first: bool disc_grad_clip: float disc_grad_clip_type: float gen_train_interval: int disc_train_interval: int sampling_rate: int class GANTrainer(Trainer): """Trainer for GAN-based training. If you'd like to use this trainer, the model must inherit espnet.train.abs_gan_espnet_model.AbsGANESPnetModel. """ @classmethod def build_options(cls, args: argparse.Namespace) -> TrainerOptions: """Build options consumed by train(), eval(), and plot_attention().""" assert check_argument_types() return build_dataclass(GANTrainerOptions, args) @classmethod def add_arguments(cls, parser: argparse.ArgumentParser): """Add additional arguments for GAN-trainer.""" parser.add_argument( "--generator_first",
# Copyright 2021 Tomoki Hayashi # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) # Adapted by Zhihao Du for GAN-based Codec models. """Trainer module for GAN-based training.""" if torch.distributed.is_available(): if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"): else: # Nothing to do if torch<1.6.0 @contextmanager def autocast(enabled=True): # NOQA yield GradScaler = None try: except ImportError: fairscale = None @dataclasses.dataclass class GANTrainerOptions(TrainerOptions): """Trainer option dataclass for GANTrainer.""" generator_first: bool disc_grad_clip: float disc_grad_clip_type: float gen_train_interval: int disc_train_interval: int sampling_rate: int class GANTrainer(Trainer): """Trainer for GAN-based training. If you'd like to use this trainer, the model must inherit espnet.train.abs_gan_espnet_model.AbsGANESPnetModel. """ @classmethod def build_options(cls, args: argparse.Namespace) -> TrainerOptions: """Build options consumed by train(), eval(), and plot_attention().""" assert check_argument_types() return build_dataclass(GANTrainerOptions, args) @classmethod def add_arguments(cls, parser: argparse.ArgumentParser): """Add additional arguments for GAN-trainer.""" parser.add_argument( "--generator_first",
type=str2bool,
9
2023-10-07 02:00:40+00:00
16k
longzw1997/Open-GroundingDino
models/GroundingDINO/groundingdino.py
[ { "identifier": "box_ops", "path": "groundingdino/util/box_ops.py", "snippet": "def box_cxcywh_to_xyxy(x):\ndef box_xyxy_to_cxcywh(x):\ndef box_iou(boxes1, boxes2):\ndef generalized_box_iou(boxes1, boxes2):\ndef box_iou_pairwise(boxes1, boxes2):\ndef generalized_box_iou_pairwise(boxes1, boxes2):\ndef masks_to_boxes(masks):" }, { "identifier": "get_tokenlizer", "path": "groundingdino/util/get_tokenlizer.py", "snippet": "def get_tokenlizer(text_encoder_type):\n if not isinstance(text_encoder_type, str):\n # print(\"text_encoder_type is not a str\")\n if hasattr(text_encoder_type, \"text_encoder_type\"):\n text_encoder_type = text_encoder_type.text_encoder_type\n elif text_encoder_type.get(\"text_encoder_type\", False):\n text_encoder_type = text_encoder_type.get(\"text_encoder_type\")\n elif os.path.isdir(text_encoder_type) and os.path.exists(text_encoder_type):\n pass\n else:\n raise ValueError(\n \"Unknown type of text_encoder_type: {}\".format(type(text_encoder_type))\n )\n print(\"final text_encoder_type: {}\".format(text_encoder_type))\n tokenizer = AutoTokenizer.from_pretrained(text_encoder_type)\n print(\"load tokenizer done.\")\n return tokenizer" }, { "identifier": "NestedTensor", "path": "groundingdino/util/misc.py", "snippet": "class NestedTensor(object):\n def __init__(self, tensors, mask: Optional[Tensor]):\n self.tensors = tensors\n self.mask = mask\n if mask == \"auto\":\n self.mask = torch.zeros_like(tensors).to(tensors.device)\n if self.mask.dim() == 3:\n self.mask = self.mask.sum(0).to(bool)\n elif self.mask.dim() == 4:\n self.mask = self.mask.sum(1).to(bool)\n else:\n raise ValueError(\n \"tensors dim must be 3 or 4 but {}({})\".format(\n self.tensors.dim(), self.tensors.shape\n )\n )\n\n def imgsize(self):\n res = []\n for i in range(self.tensors.shape[0]):\n mask = self.mask[i]\n maxH = (~mask).sum(0).max()\n maxW = (~mask).sum(1).max()\n res.append(torch.Tensor([maxH, maxW]))\n return res\n\n def to(self, device):\n # type: (Device) -> NestedTensor # noqa\n cast_tensor = self.tensors.to(device)\n mask = self.mask\n if mask is not None:\n assert mask is not None\n cast_mask = mask.to(device)\n else:\n cast_mask = None\n return NestedTensor(cast_tensor, cast_mask)\n\n def to_img_list_single(self, tensor, mask):\n assert tensor.dim() == 3, \"dim of tensor should be 3 but {}\".format(tensor.dim())\n maxH = (~mask).sum(0).max()\n maxW = (~mask).sum(1).max()\n img = tensor[:, :maxH, :maxW]\n return img\n\n def to_img_list(self):\n \"\"\"remove the padding and convert to img list\n\n Returns:\n [type]: [description]\n \"\"\"\n if self.tensors.dim() == 3:\n return self.to_img_list_single(self.tensors, self.mask)\n else:\n res = []\n for i in range(self.tensors.shape[0]):\n tensor_i = self.tensors[i]\n mask_i = self.mask[i]\n res.append(self.to_img_list_single(tensor_i, mask_i))\n return res\n\n @property\n def device(self):\n return self.tensors.device\n\n def decompose(self):\n return self.tensors, self.mask\n\n def __repr__(self):\n return str(self.tensors)\n\n @property\n def shape(self):\n return {\"tensors.shape\": self.tensors.shape, \"mask.shape\": self.mask.shape}" }, { "identifier": "accuracy", "path": "groundingdino/util/misc.py", "snippet": "@torch.no_grad()\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n if target.numel() == 0:\n return [torch.zeros([], device=output.device)]\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res" }, { "identifier": "get_world_size", "path": "groundingdino/util/misc.py", "snippet": "def get_world_size():\n if not is_dist_avail_and_initialized():\n return 1\n return dist.get_world_size()" }, { "identifier": "interpolate", "path": "groundingdino/util/misc.py", "snippet": "def interpolate(input, size=None, scale_factor=None, mode=\"nearest\", align_corners=None):\n # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor\n \"\"\"\n Equivalent to nn.functional.interpolate, but with support for empty batch sizes.\n This will eventually be supported natively by PyTorch, and this\n class can go away.\n \"\"\"\n if __torchvision_need_compat_flag < 0.7:\n if input.numel() > 0:\n return torch.nn.functional.interpolate(input, size, scale_factor, mode, align_corners)\n\n output_shape = _output_size(2, input, size, scale_factor)\n output_shape = list(input.shape[:-2]) + list(output_shape)\n return _new_empty_tensor(input, output_shape)\n else:\n return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)" }, { "identifier": "inverse_sigmoid", "path": "groundingdino/util/misc.py", "snippet": "def inverse_sigmoid(x, eps=1e-3):\n x = x.clamp(min=0, max=1)\n x1 = x.clamp(min=eps)\n x2 = (1 - x).clamp(min=eps)\n return torch.log(x1 / x2)" }, { "identifier": "is_dist_avail_and_initialized", "path": "groundingdino/util/misc.py", "snippet": "def is_dist_avail_and_initialized():\n if not dist.is_available():\n return False\n if not dist.is_initialized():\n return False\n return True" }, { "identifier": "nested_tensor_from_tensor_list", "path": "groundingdino/util/misc.py", "snippet": "def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):\n # TODO make this more general\n if tensor_list[0].ndim == 3:\n if torchvision._is_tracing():\n # nested_tensor_from_tensor_list() does not export well to ONNX\n # call _onnx_nested_tensor_from_tensor_list() instead\n return _onnx_nested_tensor_from_tensor_list(tensor_list)\n\n # TODO make it support different-sized images\n max_size = _max_by_axis([list(img.shape) for img in tensor_list])\n # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))\n batch_shape = [len(tensor_list)] + max_size\n b, c, h, w = batch_shape\n dtype = tensor_list[0].dtype\n device = tensor_list[0].device\n tensor = torch.zeros(batch_shape, dtype=dtype, device=device)\n mask = torch.ones((b, h, w), dtype=torch.bool, device=device)\n for img, pad_img, m in zip(tensor_list, tensor, mask):\n pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)\n m[: img.shape[1], : img.shape[2]] = False\n else:\n raise ValueError(\"not supported\")\n return NestedTensor(tensor, mask)" }, { "identifier": "get_phrases_from_posmap", "path": "groundingdino/util/utils.py", "snippet": "def get_phrases_from_posmap(\n posmap: torch.BoolTensor, tokenized: Dict, tokenizer: AutoTokenizer, left_idx: int = 0, right_idx: int = 255\n):\n assert isinstance(posmap, torch.Tensor), \"posmap must be torch.Tensor\"\n if posmap.dim() == 1:\n posmap[0: left_idx + 1] = False\n posmap[right_idx:] = False\n non_zero_idx = posmap.nonzero(as_tuple=True)[0].tolist()\n token_ids = [tokenized[\"input_ids\"][i] for i in non_zero_idx]\n return tokenizer.decode(token_ids)\n else:\n raise NotImplementedError(\"posmap must be 1-dim\")" }, { "identifier": "COCOVisualizer", "path": "groundingdino/util/visualizer.py", "snippet": "class COCOVisualizer:\n def __init__(self, coco=None, tokenlizer=None) -> None:\n self.coco = coco\n\n def visualize(self, img, tgt, caption=None, dpi=180, savedir=\"vis\"):\n \"\"\"\n img: tensor(3, H, W)\n tgt: make sure they are all on cpu.\n must have items: 'image_id', 'boxes', 'size'\n \"\"\"\n plt.figure(dpi=dpi)\n plt.rcParams[\"font.size\"] = \"5\"\n ax = plt.gca()\n img = renorm(img).permute(1, 2, 0)\n # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':\n # import ipdb; ipdb.set_trace()\n ax.imshow(img)\n\n self.addtgt(tgt)\n\n if tgt is None:\n image_id = 0\n elif \"image_id\" not in tgt:\n image_id = 0\n else:\n image_id = tgt[\"image_id\"]\n\n if caption is None:\n savename = \"{}/{}-{}.png\".format(\n savedir, int(image_id), str(datetime.datetime.now()).replace(\" \", \"-\")\n )\n else:\n savename = \"{}/{}-{}-{}.png\".format(\n savedir, caption, int(image_id), str(datetime.datetime.now()).replace(\" \", \"-\")\n )\n print(\"savename: {}\".format(savename))\n os.makedirs(os.path.dirname(savename), exist_ok=True)\n plt.savefig(savename)\n plt.close()\n\n def addtgt(self, tgt):\n \"\"\" \"\"\"\n if tgt is None or not \"boxes\" in tgt:\n ax = plt.gca()\n\n if \"caption\" in tgt:\n ax.set_title(tgt[\"caption\"], wrap=True)\n\n ax.set_axis_off()\n return\n\n ax = plt.gca()\n H, W = tgt[\"size\"]\n numbox = tgt[\"boxes\"].shape[0]\n\n color = []\n polygons = []\n boxes = []\n for box in tgt[\"boxes\"].cpu():\n unnormbbox = box * torch.Tensor([W, H, W, H])\n unnormbbox[:2] -= unnormbbox[2:] / 2\n [bbox_x, bbox_y, bbox_w, bbox_h] = unnormbbox.tolist()\n boxes.append([bbox_x, bbox_y, bbox_w, bbox_h])\n poly = [\n [bbox_x, bbox_y],\n [bbox_x, bbox_y + bbox_h],\n [bbox_x + bbox_w, bbox_y + bbox_h],\n [bbox_x + bbox_w, bbox_y],\n ]\n np_poly = np.array(poly).reshape((4, 2))\n polygons.append(Polygon(np_poly))\n c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0]\n color.append(c)\n\n p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.1)\n ax.add_collection(p)\n p = PatchCollection(polygons, facecolor=\"none\", edgecolors=color, linewidths=2)\n ax.add_collection(p)\n\n if \"strings_positive\" in tgt and len(tgt[\"strings_positive\"]) > 0:\n assert (\n len(tgt[\"strings_positive\"]) == numbox\n ), f\"{len(tgt['strings_positive'])} = {numbox}, \"\n for idx, strlist in enumerate(tgt[\"strings_positive\"]):\n cate_id = int(tgt[\"labels\"][idx])\n _string = str(cate_id) + \":\" + \" \".join(strlist)\n bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx]\n # ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1})\n ax.text(\n bbox_x,\n bbox_y,\n _string,\n color=\"black\",\n bbox={\"facecolor\": color[idx], \"alpha\": 0.6, \"pad\": 1},\n )\n\n if \"box_label\" in tgt:\n assert len(tgt[\"box_label\"]) == numbox, f\"{len(tgt['box_label'])} = {numbox}, \"\n for idx, bl in enumerate(tgt[\"box_label\"]):\n _string = str(bl)\n bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx]\n # ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1})\n ax.text(\n bbox_x,\n bbox_y,\n _string,\n color=\"black\",\n bbox={\"facecolor\": color[idx], \"alpha\": 0.6, \"pad\": 1},\n )\n\n if \"caption\" in tgt:\n ax.set_title(tgt[\"caption\"], wrap=True)\n # plt.figure()\n # rainbow_text(0.0,0.0,\"all unicorns poop rainbows ! ! !\".split(),\n # ['red', 'orange', 'brown', 'green', 'blue', 'purple', 'black'])\n\n if \"attn\" in tgt:\n # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':\n # import ipdb; ipdb.set_trace()\n if isinstance(tgt[\"attn\"], tuple):\n tgt[\"attn\"] = [tgt[\"attn\"]]\n for item in tgt[\"attn\"]:\n attn_map, basergb = item\n attn_map = (attn_map - attn_map.min()) / (attn_map.max() - attn_map.min() + 1e-3)\n attn_map = (attn_map * 255).astype(np.uint8)\n cm = ColorMap(basergb)\n heatmap = cm(attn_map)\n ax.imshow(heatmap)\n ax.set_axis_off()\n\n def showAnns(self, anns, draw_bbox=False):\n \"\"\"\n Display the specified annotations.\n :param anns (array of object): annotations to display\n :return: None\n \"\"\"\n if len(anns) == 0:\n return 0\n if \"segmentation\" in anns[0] or \"keypoints\" in anns[0]:\n datasetType = \"instances\"\n elif \"caption\" in anns[0]:\n datasetType = \"captions\"\n else:\n raise Exception(\"datasetType not supported\")\n if datasetType == \"instances\":\n ax = plt.gca()\n ax.set_autoscale_on(False)\n polygons = []\n color = []\n for ann in anns:\n c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0]\n if \"segmentation\" in ann:\n if type(ann[\"segmentation\"]) == list:\n # polygon\n for seg in ann[\"segmentation\"]:\n poly = np.array(seg).reshape((int(len(seg) / 2), 2))\n polygons.append(Polygon(poly))\n color.append(c)\n else:\n # mask\n t = self.imgs[ann[\"image_id\"]]\n if type(ann[\"segmentation\"][\"counts\"]) == list:\n rle = maskUtils.frPyObjects(\n [ann[\"segmentation\"]], t[\"height\"], t[\"width\"]\n )\n else:\n rle = [ann[\"segmentation\"]]\n m = maskUtils.decode(rle)\n img = np.ones((m.shape[0], m.shape[1], 3))\n if ann[\"iscrowd\"] == 1:\n color_mask = np.array([2.0, 166.0, 101.0]) / 255\n if ann[\"iscrowd\"] == 0:\n color_mask = np.random.random((1, 3)).tolist()[0]\n for i in range(3):\n img[:, :, i] = color_mask[i]\n ax.imshow(np.dstack((img, m * 0.5)))\n if \"keypoints\" in ann and type(ann[\"keypoints\"]) == list:\n # turn skeleton into zero-based index\n sks = np.array(self.loadCats(ann[\"category_id\"])[0][\"skeleton\"]) - 1\n kp = np.array(ann[\"keypoints\"])\n x = kp[0::3]\n y = kp[1::3]\n v = kp[2::3]\n for sk in sks:\n if np.all(v[sk] > 0):\n plt.plot(x[sk], y[sk], linewidth=3, color=c)\n plt.plot(\n x[v > 0],\n y[v > 0],\n \"o\",\n markersize=8,\n markerfacecolor=c,\n markeredgecolor=\"k\",\n markeredgewidth=2,\n )\n plt.plot(\n x[v > 1],\n y[v > 1],\n \"o\",\n markersize=8,\n markerfacecolor=c,\n markeredgecolor=c,\n markeredgewidth=2,\n )\n\n if draw_bbox:\n [bbox_x, bbox_y, bbox_w, bbox_h] = ann[\"bbox\"]\n poly = [\n [bbox_x, bbox_y],\n [bbox_x, bbox_y + bbox_h],\n [bbox_x + bbox_w, bbox_y + bbox_h],\n [bbox_x + bbox_w, bbox_y],\n ]\n np_poly = np.array(poly).reshape((4, 2))\n polygons.append(Polygon(np_poly))\n color.append(c)\n\n # p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)\n # ax.add_collection(p)\n p = PatchCollection(polygons, facecolor=\"none\", edgecolors=color, linewidths=2)\n ax.add_collection(p)\n elif datasetType == \"captions\":\n for ann in anns:\n print(ann[\"caption\"])" }, { "identifier": "create_positive_map_from_span", "path": "groundingdino/util/vl_utils.py", "snippet": "def create_positive_map_from_span(tokenized, token_span, max_text_len=256):\n \"\"\"construct a map such that positive_map[i,j] = True iff box i is associated to token j\n Input:\n - tokenized:\n - input_ids: Tensor[1, ntokens]\n - attention_mask: Tensor[1, ntokens]\n - token_span: list with length num_boxes.\n - each item: [start_idx, end_idx]\n \"\"\"\n positive_map = torch.zeros((len(token_span), max_text_len), dtype=torch.float)\n for j, tok_list in enumerate(token_span):\n for (beg, end) in tok_list:\n beg_pos = tokenized.char_to_token(beg)\n end_pos = tokenized.char_to_token(end - 1)\n if beg_pos is None:\n try:\n beg_pos = tokenized.char_to_token(beg + 1)\n if beg_pos is None:\n beg_pos = tokenized.char_to_token(beg + 2)\n except:\n beg_pos = None\n if end_pos is None:\n try:\n end_pos = tokenized.char_to_token(end - 2)\n if end_pos is None:\n end_pos = tokenized.char_to_token(end - 3)\n except:\n end_pos = None\n if beg_pos is None or end_pos is None:\n continue\n\n assert beg_pos is not None and end_pos is not None\n if os.environ.get(\"SHILONG_DEBUG_ONLY_ONE_POS\", None) == \"TRUE\":\n positive_map[j, beg_pos] = 1\n break\n else:\n positive_map[j, beg_pos : end_pos + 1].fill_(1)\n\n return positive_map / (positive_map.sum(-1)[:, None] + 1e-6)" }, { "identifier": "MODULE_BUILD_FUNCS", "path": "models/registry.py", "snippet": "MODULE_BUILD_FUNCS = Registry('model build functions')" }, { "identifier": "build_backbone", "path": "models/GroundingDINO/backbone/backbone.py", "snippet": "def build_backbone(args):\n \"\"\"\n Useful args:\n - backbone: backbone name\n - lr_backbone:\n - dilation\n - return_interm_indices: available: [0,1,2,3], [1,2,3], [3]\n - backbone_freeze_keywords:\n - use_checkpoint: for swin only for now\n\n \"\"\"\n position_embedding = build_position_encoding(args)\n train_backbone = True\n if not train_backbone:\n raise ValueError(\"Please set lr_backbone > 0\")\n return_interm_indices = args.return_interm_indices\n assert return_interm_indices in [[0, 1, 2, 3], [1, 2, 3], [3]]\n args.backbone_freeze_keywords\n use_checkpoint = getattr(args, \"use_checkpoint\", False)\n\n if args.backbone in [\"resnet50\", \"resnet101\"]:\n backbone = Backbone(\n args.backbone,\n train_backbone,\n args.dilation,\n return_interm_indices,\n batch_norm=FrozenBatchNorm2d,\n )\n bb_num_channels = backbone.num_channels\n elif args.backbone in [\n \"swin_T_224_1k\",\n \"swin_B_224_22k\",\n \"swin_B_384_22k\",\n \"swin_L_224_22k\",\n \"swin_L_384_22k\",\n ]:\n pretrain_img_size = int(args.backbone.split(\"_\")[-2])\n backbone = build_swin_transformer(\n args.backbone,\n pretrain_img_size=pretrain_img_size,\n out_indices=tuple(return_interm_indices),\n dilation=False,\n use_checkpoint=use_checkpoint,\n )\n\n bb_num_channels = backbone.num_features[4 - len(return_interm_indices) :]\n else:\n raise NotImplementedError(\"Unknown backbone {}\".format(args.backbone))\n\n assert len(bb_num_channels) == len(\n return_interm_indices\n ), f\"len(bb_num_channels) {len(bb_num_channels)} != len(return_interm_indices) {len(return_interm_indices)}\"\n\n model = Joiner(backbone, position_embedding)\n model.num_channels = bb_num_channels\n assert isinstance(\n bb_num_channels, List\n ), \"bb_num_channels is expected to be a List but {}\".format(type(bb_num_channels))\n # import ipdb; ipdb.set_trace()\n return model" }, { "identifier": "BertModelWarper", "path": "models/GroundingDINO/bertwarper.py", "snippet": "class BertModelWarper(nn.Module):\n def __init__(self, bert_model):\n super().__init__()\n # self.bert = bert_modelc\n\n self.config = bert_model.config\n self.embeddings = bert_model.embeddings\n self.encoder = bert_model.encoder\n self.pooler = bert_model.pooler\n\n self.get_extended_attention_mask = bert_model.get_extended_attention_mask\n self.invert_attention_mask = bert_model.invert_attention_mask\n self.get_head_mask = bert_model.get_head_mask\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n \"\"\"\n output_attentions = (\n output_attentions if output_attentions is not None else self.config.output_attentions\n )\n output_hidden_states = (\n output_hidden_states\n if output_hidden_states is not None\n else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if self.config.is_decoder:\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n else:\n use_cache = False\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n batch_size, seq_length = input_shape\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n batch_size, seq_length = input_shape\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n # past_key_values_length\n past_key_values_length = (\n past_key_values[0][0].shape[2] if past_key_values is not None else 0\n )\n\n if attention_mask is None:\n attention_mask = torch.ones(\n ((batch_size, seq_length + past_key_values_length)), device=device\n )\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(\n attention_mask, input_shape, device\n )\n\n # If a 2D or 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.is_decoder and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':\n # import ipdb; ipdb.set_trace()\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output = self.embeddings(\n input_ids=input_ids,\n position_ids=position_ids,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n past_key_values_length=past_key_values_length,\n )\n\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output) if self.pooler is not None else None\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPoolingAndCrossAttentions(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n past_key_values=encoder_outputs.past_key_values,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n cross_attentions=encoder_outputs.cross_attentions,\n )" }, { "identifier": "generate_masks_with_special_tokens", "path": "models/GroundingDINO/bertwarper.py", "snippet": "def generate_masks_with_special_tokens(tokenized, special_tokens_list, tokenizer):\n \"\"\"Generate attention mask between each pair of special tokens\n Args:\n input_ids (torch.Tensor): input ids. Shape: [bs, num_token]\n special_tokens_mask (list): special tokens mask.\n Returns:\n torch.Tensor: attention mask between each special tokens.\n \"\"\"\n input_ids = tokenized[\"input_ids\"]\n bs, num_token = input_ids.shape\n # special_tokens_mask: bs, num_token. 1 for special tokens. 0 for normal tokens\n special_tokens_mask = torch.zeros((bs, num_token), device=input_ids.device).bool()\n for special_token in special_tokens_list:\n special_tokens_mask |= input_ids == special_token\n\n # idxs: each row is a list of indices of special tokens\n idxs = torch.nonzero(special_tokens_mask)\n\n # generate attention mask and positional ids\n attention_mask = (\n torch.eye(num_token, device=input_ids.device).bool().unsqueeze(0).repeat(bs, 1, 1)\n )\n position_ids = torch.zeros((bs, num_token), device=input_ids.device)\n previous_col = 0\n for i in range(idxs.shape[0]):\n row, col = idxs[i]\n if (col == 0) or (col == num_token - 1):\n attention_mask[row, col, col] = True\n position_ids[row, col] = 0\n else:\n attention_mask[row, previous_col + 1 : col + 1, previous_col + 1 : col + 1] = True\n position_ids[row, previous_col + 1 : col + 1] = torch.arange(\n 0, col - previous_col, device=input_ids.device\n )\n\n previous_col = col\n\n # # padding mask\n # padding_mask = tokenized['attention_mask']\n # attention_mask = attention_mask & padding_mask.unsqueeze(1).bool() & padding_mask.unsqueeze(2).bool()\n\n return attention_mask, position_ids.to(torch.long)" }, { "identifier": "generate_masks_with_special_tokens_and_transfer_map", "path": "models/GroundingDINO/bertwarper.py", "snippet": "def generate_masks_with_special_tokens_and_transfer_map(tokenized, special_tokens_list, tokenizer):\n \"\"\"Generate attention mask between each pair of special tokens\n Args:\n input_ids (torch.Tensor): input ids. Shape: [bs, num_token]\n special_tokens_mask (list): special tokens mask.\n Returns:\n torch.Tensor: attention mask between each special tokens.\n \"\"\"\n input_ids = tokenized[\"input_ids\"]\n bs, num_token = input_ids.shape\n # special_tokens_mask: bs, num_token. 1 for special tokens. 0 for normal tokens\n special_tokens_mask = torch.zeros((bs, num_token), device=input_ids.device).bool()\n for special_token in special_tokens_list:\n special_tokens_mask |= input_ids == special_token\n\n # idxs: each row is a list of indices of special tokens\n idxs = torch.nonzero(special_tokens_mask)\n\n # generate attention mask and positional ids\n attention_mask = (\n torch.eye(num_token, device=input_ids.device).bool().unsqueeze(0).repeat(bs, 1, 1)\n )\n position_ids = torch.zeros((bs, num_token), device=input_ids.device)\n cate_to_token_mask_list = [[] for _ in range(bs)]\n previous_col = 0\n for i in range(idxs.shape[0]):\n row, col = idxs[i]\n if (col == 0) or (col == num_token - 1):\n attention_mask[row, col, col] = True\n position_ids[row, col] = 0\n else:\n attention_mask[row, previous_col + 1 : col + 1, previous_col + 1 : col + 1] = True\n position_ids[row, previous_col + 1 : col + 1] = torch.arange(\n 0, col - previous_col, device=input_ids.device\n )\n c2t_maski = torch.zeros((num_token), device=input_ids.device).bool()\n c2t_maski[previous_col + 1 : col] = True\n cate_to_token_mask_list[row].append(c2t_maski)\n previous_col = col\n\n cate_to_token_mask_list = [\n torch.stack(cate_to_token_mask_listi, dim=0)\n for cate_to_token_mask_listi in cate_to_token_mask_list\n ]\n\n # # padding mask\n # padding_mask = tokenized['attention_mask']\n # attention_mask = attention_mask & padding_mask.unsqueeze(1).bool() & padding_mask.unsqueeze(2).bool()\n\n return attention_mask, position_ids.to(torch.long), cate_to_token_mask_list" }, { "identifier": "build_transformer", "path": "models/GroundingDINO/transformer.py", "snippet": "def build_transformer(args):\n return Transformer(\n d_model=args.hidden_dim,\n dropout=args.dropout,\n nhead=args.nheads,\n num_queries=args.num_queries,\n dim_feedforward=args.dim_feedforward,\n num_encoder_layers=args.enc_layers,\n num_decoder_layers=args.dec_layers,\n normalize_before=args.pre_norm,\n return_intermediate_dec=True,\n query_dim=args.query_dim,\n activation=args.transformer_activation,\n num_patterns=args.num_patterns,\n num_feature_levels=args.num_feature_levels,\n enc_n_points=args.enc_n_points,\n dec_n_points=args.dec_n_points,\n learnable_tgt_init=True,\n # two stage\n two_stage_type=args.two_stage_type, # ['no', 'standard', 'early']\n embed_init_tgt=args.embed_init_tgt,\n use_text_enhancer=args.use_text_enhancer,\n use_fusion_layer=args.use_fusion_layer,\n use_checkpoint=args.use_checkpoint,\n use_transformer_ckpt=args.use_transformer_ckpt,\n use_text_cross_attention=args.use_text_cross_attention,\n text_dropout=args.text_dropout,\n fusion_dropout=args.fusion_dropout,\n fusion_droppath=args.fusion_droppath,\n )" }, { "identifier": "MLP", "path": "models/GroundingDINO/utils.py", "snippet": "class MLP(nn.Module):\n \"\"\"Very simple multi-layer perceptron (also called FFN)\"\"\"\n\n def __init__(self, input_dim, hidden_dim, output_dim, num_layers):\n super().__init__()\n self.num_layers = num_layers\n h = [hidden_dim] * (num_layers - 1)\n self.layers = nn.ModuleList(\n nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])\n )\n\n def forward(self, x):\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x" }, { "identifier": "ContrastiveEmbed", "path": "models/GroundingDINO/utils.py", "snippet": "class ContrastiveEmbed(nn.Module):\n def __init__(self, max_text_len=256):\n \"\"\"\n Args:\n max_text_len: max length of text.\n \"\"\"\n super().__init__()\n self.max_text_len = max_text_len\n\n def forward(self, x, text_dict):\n \"\"\"_summary_\n\n Args:\n x (_type_): _description_\n text_dict (_type_): _description_\n {\n 'encoded_text': encoded_text, # bs, 195, d_model\n 'text_token_mask': text_token_mask, # bs, 195\n # True for used tokens. False for padding tokens\n }\n Returns:\n _type_: _description_\n \"\"\"\n assert isinstance(text_dict, dict)\n # print(x) #torch.Size([2, 16320, 256])\n # print(text_dict)\n\n # import pdb;pdb.set_trace()\n y = text_dict[\"encoded_text\"] #torch.Size([2, 195, 256])\n text_token_mask = text_dict[\"text_token_mask\"]\n\n res = x @ y.transpose(-1, -2)\n res.masked_fill_(~text_token_mask[:, None, :], float(\"-inf\"))\n # 接着,对res进行掩码操作,将未使用的文本token(即padding的token)对应的得分置为负无穷float(\"-inf\")。这是为了在计算相似度时,排除padding部分的影响。\n\n\n # padding to max_text_len\n new_res = torch.full((*res.shape[:-1], self.max_text_len), float(\"-inf\"), device=res.device)\n new_res[..., : res.shape[-1]] = res #torch.Size([2, 16320, 195])\n\n return new_res" }, { "identifier": "sigmoid_focal_loss", "path": "models/GroundingDINO/utils.py", "snippet": "def sigmoid_focal_loss(\n inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2, no_reduction=False\n):\n \"\"\"\n Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.\n Args:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n alpha: (optional) Weighting factor in range (0,1) to balance\n positive vs negative examples. Default = -1 (no weighting).\n gamma: Exponent of the modulating factor (1 - p_t) to\n balance easy vs hard examples.\n Returns:\n Loss tensor\n \"\"\"\n prob = inputs.sigmoid()\n ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction=\"none\")\n p_t = prob * targets + (1 - prob) * (1 - targets)\n loss = ce_loss * ((1 - p_t) ** gamma)\n\n if alpha >= 0:\n alpha_t = alpha * targets + (1 - alpha) * (1 - targets)\n loss = alpha_t * loss\n\n if no_reduction:\n return loss\n\n return loss.mean(1).sum() / num_boxes" }, { "identifier": "build_matcher", "path": "models/GroundingDINO/matcher.py", "snippet": "def build_matcher(args):\n assert args.matcher_type in ['HungarianMatcher', 'SimpleMinsumMatcher'], \"Unknown args.matcher_type: {}\".format(args.matcher_type)\n if args.matcher_type == 'HungarianMatcher':\n return HungarianMatcher(\n cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou,\n focal_alpha=args.focal_alpha\n )\n elif args.matcher_type == 'SimpleMinsumMatcher':\n return SimpleMinsumMatcher(\n cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou,\n focal_alpha=args.focal_alpha\n ) \n else:\n raise NotImplementedError(\"Unknown args.matcher_type: {}\".format(args.matcher_type))" } ]
import copy import torch import torch.nn.functional as F from typing import List from torch import nn from torchvision.ops.boxes import nms from transformers import AutoTokenizer, BertModel, BertTokenizer, RobertaModel, RobertaTokenizerFast from groundingdino.util import box_ops, get_tokenlizer from groundingdino.util.misc import ( NestedTensor, accuracy, get_world_size, interpolate, inverse_sigmoid, is_dist_avail_and_initialized, nested_tensor_from_tensor_list, ) from groundingdino.util.utils import get_phrases_from_posmap from groundingdino.util.visualizer import COCOVisualizer from groundingdino.util.vl_utils import create_positive_map_from_span from ..registry import MODULE_BUILD_FUNCS from .backbone import build_backbone from .bertwarper import ( BertModelWarper, generate_masks_with_special_tokens, generate_masks_with_special_tokens_and_transfer_map, ) from .transformer import build_transformer from .utils import MLP, ContrastiveEmbed, sigmoid_focal_loss from .matcher import build_matcher from pycocotools.coco import COCO
13,534
class PostProcess(nn.Module): """ This module converts the model's output into the format expected by the coco api""" def __init__(self, num_select=100,text_encoder_type='text_encoder_type', nms_iou_threshold=-1,use_coco_eval=False,args=None) -> None: super().__init__() self.num_select = num_select self.tokenizer = get_tokenlizer.get_tokenlizer(text_encoder_type) if args.use_coco_eval: coco = COCO(args.coco_val_path) category_dict = coco.loadCats(coco.getCatIds()) cat_list = [item['name'] for item in category_dict] else: cat_list=args.label_list caption = " . ".join(cat_list) + ' .' tokenized = self.tokenizer(caption, padding="longest", return_tensors="pt") label_list = torch.arange(len(cat_list)) pos_map=create_positive_map(tokenized,label_list,cat_list,caption) # build a mapping from label_id to pos_map if args.use_coco_eval: id_map = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 7, 7: 8, 8: 9, 9: 10, 10: 11, 11: 13, 12: 14, 13: 15, 14: 16, 15: 17, 16: 18, 17: 19, 18: 20, 19: 21, 20: 22, 21: 23, 22: 24, 23: 25, 24: 27, 25: 28, 26: 31, 27: 32, 28: 33, 29: 34, 30: 35, 31: 36, 32: 37, 33: 38, 34: 39, 35: 40, 36: 41, 37: 42, 38: 43, 39: 44, 40: 46, 41: 47, 42: 48, 43: 49, 44: 50, 45: 51, 46: 52, 47: 53, 48: 54, 49: 55, 50: 56, 51: 57, 52: 58, 53: 59, 54: 60, 55: 61, 56: 62, 57: 63, 58: 64, 59: 65, 60: 67, 61: 70, 62: 72, 63: 73, 64: 74, 65: 75, 66: 76, 67: 77, 68: 78, 69: 79, 70: 80, 71: 81, 72: 82, 73: 84, 74: 85, 75: 86, 76: 87, 77: 88, 78: 89, 79: 90} new_pos_map = torch.zeros((91, 256)) for k, v in id_map.items(): new_pos_map[v] = pos_map[k] pos_map=new_pos_map self.nms_iou_threshold=nms_iou_threshold self.positive_map = pos_map @torch.no_grad() def forward(self, outputs, target_sizes, not_to_xyxy=False, test=False): """ Perform the computation Parameters: outputs: raw outputs of the model target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch For evaluation, this must be the original image size (before any data augmentation) For visualization, this should be the image size after data augment, but before padding """ num_select = self.num_select out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes'] prob_to_token = out_logits.sigmoid() pos_maps = self.positive_map.to(prob_to_token.device) for label_ind in range(len(pos_maps)): if pos_maps[label_ind].sum() != 0: pos_maps[label_ind]=pos_maps[label_ind]/pos_maps[label_ind].sum() prob_to_label = prob_to_token @ pos_maps.T assert len(out_logits) == len(target_sizes) assert target_sizes.shape[1] == 2 prob = prob_to_label topk_values, topk_indexes = torch.topk(prob.view(prob.shape[0], -1), num_select, dim=1) scores = topk_values topk_boxes = torch.div(topk_indexes, prob.shape[2], rounding_mode='trunc') labels = topk_indexes % prob.shape[2] if not_to_xyxy: boxes = out_bbox else: boxes = box_ops.box_cxcywh_to_xyxy(out_bbox) # if test: # assert not not_to_xyxy # boxes[:,:,2:] = boxes[:,:,2:] - boxes[:,:,:2] boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1,1,4)) # and from relative [0, 1] to absolute [0, height] coordinates img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] if self.nms_iou_threshold > 0: item_indices = [nms(b, s, iou_threshold=self.nms_iou_threshold) for b,s in zip(boxes, scores)] results = [{'scores': s[i], 'labels': l[i], 'boxes': b[i]} for s, l, b, i in zip(scores, labels, boxes, item_indices)] else: results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)] results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)] return results @MODULE_BUILD_FUNCS.registe_with_name(module_name="groundingdino") def build_groundingdino(args): device = torch.device(args.device) backbone = build_backbone(args) transformer = build_transformer(args) dn_labelbook_size = args.dn_labelbook_size dec_pred_bbox_embed_share = args.dec_pred_bbox_embed_share sub_sentence_present = args.sub_sentence_present model = GroundingDINO( backbone, transformer, num_queries=args.num_queries, aux_loss=args.aux_loss, iter_update=True, query_dim=4, num_feature_levels=args.num_feature_levels, nheads=args.nheads, dec_pred_bbox_embed_share=dec_pred_bbox_embed_share, two_stage_type=args.two_stage_type, two_stage_bbox_embed_share=args.two_stage_bbox_embed_share, two_stage_class_embed_share=args.two_stage_class_embed_share, num_patterns=args.num_patterns, dn_number=0, dn_box_noise_scale=args.dn_box_noise_scale, dn_label_noise_ratio=args.dn_label_noise_ratio, dn_labelbook_size=dn_labelbook_size, text_encoder_type=args.text_encoder_type, sub_sentence_present=sub_sentence_present, max_text_len=args.max_text_len, )
# ------------------------------------------------------------------------ # Grounding DINO # url: https://github.com/IDEA-Research/GroundingDINO # Copyright (c) 2023 IDEA. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Conditional DETR model and criterion classes. # Copyright (c) 2021 Microsoft. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Modified from DETR (https://github.com/facebookresearch/detr) # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # ------------------------------------------------------------------------ # Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR) # Copyright (c) 2020 SenseTime. All Rights Reserved. # ------------------------------------------------------------------------ class GroundingDINO(nn.Module): """This is the Cross-Attention Detector module that performs object detection""" def __init__( self, backbone, transformer, num_queries, aux_loss=False, iter_update=False, query_dim=2, num_feature_levels=1, nheads=8, # two stage two_stage_type="no", # ['no', 'standard'] dec_pred_bbox_embed_share=True, two_stage_class_embed_share=True, two_stage_bbox_embed_share=True, num_patterns=0, dn_number=100, dn_box_noise_scale=0.4, dn_label_noise_ratio=0.5, dn_labelbook_size=100, text_encoder_type="bert-base-uncased", sub_sentence_present=True, max_text_len=256, ): """Initializes the model. Parameters: backbone: torch module of the backbone to be used. See backbone.py transformer: torch module of the transformer architecture. See transformer.py num_queries: number of object queries, ie detection slot. This is the maximal number of objects Conditional DETR can detect in a single image. For COCO, we recommend 100 queries. aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. """ super().__init__() self.num_queries = num_queries self.transformer = transformer self.hidden_dim = hidden_dim = transformer.d_model self.num_feature_levels = num_feature_levels self.nheads = nheads self.max_text_len = 256 self.sub_sentence_present = sub_sentence_present # setting query dim self.query_dim = query_dim assert query_dim == 4 # for dn training self.num_patterns = num_patterns self.dn_number = dn_number self.dn_box_noise_scale = dn_box_noise_scale self.dn_label_noise_ratio = dn_label_noise_ratio self.dn_labelbook_size = dn_labelbook_size # bert self.tokenizer = get_tokenlizer.get_tokenlizer(text_encoder_type) self.bert = get_tokenlizer.get_pretrained_language_model(text_encoder_type) self.bert.pooler.dense.weight.requires_grad_(False) self.bert.pooler.dense.bias.requires_grad_(False) self.bert = BertModelWarper(bert_model=self.bert) self.feat_map = nn.Linear(self.bert.config.hidden_size, self.hidden_dim, bias=True) nn.init.constant_(self.feat_map.bias.data, 0) nn.init.xavier_uniform_(self.feat_map.weight.data) # freeze # special tokens self.specical_tokens = self.tokenizer.convert_tokens_to_ids(["[CLS]", "[SEP]", ".", "?"]) # prepare input projection layers if num_feature_levels > 1: num_backbone_outs = len(backbone.num_channels) input_proj_list = [] for _ in range(num_backbone_outs): in_channels = backbone.num_channels[_] input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), ) ) for _ in range(num_feature_levels - num_backbone_outs): input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, hidden_dim), ) ) in_channels = hidden_dim self.input_proj = nn.ModuleList(input_proj_list) else: assert two_stage_type == "no", "two_stage_type should be no if num_feature_levels=1 !!!" self.input_proj = nn.ModuleList( [ nn.Sequential( nn.Conv2d(backbone.num_channels[-1], hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), ) ] ) self.backbone = backbone self.aux_loss = aux_loss self.box_pred_damping = box_pred_damping = None self.iter_update = iter_update assert iter_update, "Why not iter_update?" # prepare pred layers self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share # prepare class & box embed _class_embed = ContrastiveEmbed() _bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) nn.init.constant_(_bbox_embed.layers[-1].weight.data, 0) nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0) if dec_pred_bbox_embed_share: box_embed_layerlist = [_bbox_embed for i in range(transformer.num_decoder_layers)] else: box_embed_layerlist = [ copy.deepcopy(_bbox_embed) for i in range(transformer.num_decoder_layers) ] class_embed_layerlist = [_class_embed for i in range(transformer.num_decoder_layers)] self.bbox_embed = nn.ModuleList(box_embed_layerlist) self.class_embed = nn.ModuleList(class_embed_layerlist) self.transformer.decoder.bbox_embed = self.bbox_embed self.transformer.decoder.class_embed = self.class_embed # two stage self.two_stage_type = two_stage_type assert two_stage_type in ["no", "standard"], "unknown param {} of two_stage_type".format( two_stage_type ) if two_stage_type != "no": if two_stage_bbox_embed_share: assert dec_pred_bbox_embed_share self.transformer.enc_out_bbox_embed = _bbox_embed else: self.transformer.enc_out_bbox_embed = copy.deepcopy(_bbox_embed) if two_stage_class_embed_share: assert dec_pred_bbox_embed_share self.transformer.enc_out_class_embed = _class_embed else: self.transformer.enc_out_class_embed = copy.deepcopy(_class_embed) self.refpoint_embed = None self._reset_parameters() def _reset_parameters(self): # init input_proj for proj in self.input_proj: nn.init.xavier_uniform_(proj[0].weight, gain=1) nn.init.constant_(proj[0].bias, 0) def init_ref_points(self, use_num_queries): self.refpoint_embed = nn.Embedding(use_num_queries, self.query_dim) def forward(self, samples: NestedTensor, targets: List = None, **kw): """The forward expects a NestedTensor, which consists of: - samples.tensor: batched images, of shape [batch_size x 3 x H x W] - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels It returns a dict with the following elements: - "pred_logits": the classification logits (including no-object) for all queries. Shape= [batch_size x num_queries x num_classes] - "pred_boxes": The normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image (disregarding possible padding). See PostProcess for information on how to retrieve the unnormalized bounding box. - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of dictionnaries containing the two above keys for each decoder layer. """ if targets is None: captions = kw["captions"] else: captions = [t["caption"] for t in targets] # encoder texts tokenized = self.tokenizer(captions, padding="longest", return_tensors="pt").to( samples.device ) one_hot_token = tokenized ( text_self_attention_masks, position_ids, cate_to_token_mask_list, ) = generate_masks_with_special_tokens_and_transfer_map( tokenized, self.specical_tokens, self.tokenizer ) if text_self_attention_masks.shape[1] > self.max_text_len: text_self_attention_masks = text_self_attention_masks[ :, : self.max_text_len, : self.max_text_len ] position_ids = position_ids[:, : self.max_text_len] tokenized["input_ids"] = tokenized["input_ids"][:, : self.max_text_len] tokenized["attention_mask"] = tokenized["attention_mask"][:, : self.max_text_len] tokenized["token_type_ids"] = tokenized["token_type_ids"][:, : self.max_text_len] # extract text embeddings if self.sub_sentence_present: tokenized_for_encoder = {k: v for k, v in tokenized.items() if k != "attention_mask"} tokenized_for_encoder["attention_mask"] = text_self_attention_masks tokenized_for_encoder["position_ids"] = position_ids else: tokenized_for_encoder = tokenized bert_output = self.bert(**tokenized_for_encoder) # bs, 195, 768 encoded_text = self.feat_map(bert_output["last_hidden_state"]) # bs, 195, d_model text_token_mask = tokenized.attention_mask.bool() # bs, 195 # text_token_mask: True for nomask, False for mask # text_self_attention_masks: True for nomask, False for mask if encoded_text.shape[1] > self.max_text_len: encoded_text = encoded_text[:, : self.max_text_len, :] text_token_mask = text_token_mask[:, : self.max_text_len] position_ids = position_ids[:, : self.max_text_len] text_self_attention_masks = text_self_attention_masks[ :, : self.max_text_len, : self.max_text_len ] text_dict = { "encoded_text": encoded_text, # bs, 195, d_model "text_token_mask": text_token_mask, # bs, 195 "position_ids": position_ids, # bs, 195 "text_self_attention_masks": text_self_attention_masks, # bs, 195,195 } if isinstance(samples, (list, torch.Tensor)): samples = nested_tensor_from_tensor_list(samples) features, poss = self.backbone(samples) srcs = [] masks = [] for l, feat in enumerate(features): src, mask = feat.decompose() srcs.append(self.input_proj[l](src)) masks.append(mask) assert mask is not None if self.num_feature_levels > len(srcs): _len_srcs = len(srcs) for l in range(_len_srcs, self.num_feature_levels): if l == _len_srcs: src = self.input_proj[l](features[-1].tensors) else: src = self.input_proj[l](srcs[-1]) m = samples.mask mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(torch.bool)[0] pos_l = self.backbone[1](NestedTensor(src, mask)).to(src.dtype) srcs.append(src) masks.append(mask) poss.append(pos_l) input_query_bbox = input_query_label = attn_mask = dn_meta = None hs, reference, hs_enc, ref_enc, init_box_proposal = self.transformer( srcs, masks, input_query_bbox, poss, input_query_label, attn_mask, text_dict ) # deformable-detr-like anchor update outputs_coord_list = [] for dec_lid, (layer_ref_sig, layer_bbox_embed, layer_hs) in enumerate( zip(reference[:-1], self.bbox_embed, hs) ): layer_delta_unsig = layer_bbox_embed(layer_hs) layer_outputs_unsig = layer_delta_unsig + inverse_sigmoid(layer_ref_sig) layer_outputs_unsig = layer_outputs_unsig.sigmoid() outputs_coord_list.append(layer_outputs_unsig) outputs_coord_list = torch.stack(outputs_coord_list) outputs_class = torch.stack( [ layer_cls_embed(layer_hs, text_dict) for layer_cls_embed, layer_hs in zip(self.class_embed, hs) ] ) out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord_list[-1]} # Used to calculate losses bs, len_td = text_dict['text_token_mask'].shape out['text_mask']=torch.zeros(bs, self.max_text_len, dtype=torch.bool).to( samples.device ) for b in range(bs): for j in range(len_td): if text_dict['text_token_mask'][b][j] == True: out['text_mask'][b][j] = True # for intermediate outputs if self.aux_loss: out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord_list) out['token']=one_hot_token # # for encoder output if hs_enc is not None: # prepare intermediate outputs interm_coord = ref_enc[-1] interm_class = self.transformer.enc_out_class_embed(hs_enc[-1], text_dict) out['interm_outputs'] = {'pred_logits': interm_class, 'pred_boxes': interm_coord} out['interm_outputs_for_matching_pre'] = {'pred_logits': interm_class, 'pred_boxes': init_box_proposal} # outputs['pred_logits'].shape # torch.Size([4, 900, 256]) # outputs['pred_boxes'].shape # torch.Size([4, 900, 4]) # outputs['text_mask'].shape # torch.Size([256]) # outputs['text_mask'] # outputs['aux_outputs'][0].keys() # dict_keys(['pred_logits', 'pred_boxes', 'one_hot', 'text_mask']) # outputs['aux_outputs'][img_idx] # outputs['token'] # <class 'transformers.tokenization_utils_base.BatchEncoding'> # outputs['interm_outputs'].keys() # dict_keys(['pred_logits', 'pred_boxes', 'one_hot', 'text_mask']) # outputs['interm_outputs_for_matching_pre'].keys() # dict_keys(['pred_logits', 'pred_boxes']) # outputs['one_hot'].shape # torch.Size([4, 900, 256]) return out @torch.jit.unused def _set_aux_loss(self, outputs_class, outputs_coord): # this is a workaround to make torchscript happy, as torchscript # doesn't support dictionary with non-homogeneous values, such # as a dict having both a Tensor and a list. return [ {"pred_logits": a, "pred_boxes": b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1]) ] class SetCriterion(nn.Module): def __init__(self, matcher, weight_dict, focal_alpha,focal_gamma, losses): """ Create the criterion. Parameters: matcher: module able to compute a matching between targets and proposals weight_dict: dict containing as key the names of the losses and as values their relative weight. losses: list of all the losses to be applied. See get_loss for list of available losses. focal_alpha: alpha in Focal Loss """ super().__init__() self.matcher = matcher self.weight_dict = weight_dict self.losses = losses self.focal_alpha = focal_alpha self.focal_gamma= focal_gamma @torch.no_grad() def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients """ pred_logits = outputs['pred_logits'] device = pred_logits.device tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1) card_err = F.l1_loss(card_pred.float(), tgt_lengths.float()) losses = {'cardinality_error': card_err} return losses def loss_boxes(self, outputs, targets, indices, num_boxes): """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4] The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ assert 'pred_boxes' in outputs idx = self._get_src_permutation_idx(indices) src_boxes = outputs['pred_boxes'][idx] target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0) loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none') losses = {} losses['loss_bbox'] = loss_bbox.sum() / num_boxes loss_giou = 1 - torch.diag(box_ops.generalized_box_iou( box_ops.box_cxcywh_to_xyxy(src_boxes), box_ops.box_cxcywh_to_xyxy(target_boxes))) losses['loss_giou'] = loss_giou.sum() / num_boxes # calculate the x,y and h,w loss with torch.no_grad(): losses['loss_xy'] = loss_bbox[..., :2].sum() / num_boxes losses['loss_hw'] = loss_bbox[..., 2:].sum() / num_boxes return losses def token_sigmoid_binary_focal_loss(self, outputs, targets, indices, num_boxes): pred_logits=outputs['pred_logits'] new_targets=outputs['one_hot'].to(pred_logits.device) text_mask=outputs['text_mask'] assert (new_targets.dim() == 3) assert (pred_logits.dim() == 3) # batch x from x to bs, n, _ = pred_logits.shape alpha=self.focal_alpha gamma=self.focal_gamma if text_mask is not None: # ODVG: each sample has different mask text_mask = text_mask.repeat(1, pred_logits.size(1)).view(outputs['text_mask'].shape[0],-1,outputs['text_mask'].shape[1]) pred_logits = torch.masked_select(pred_logits, text_mask) new_targets = torch.masked_select(new_targets, text_mask) new_targets=new_targets.float() p = torch.sigmoid(pred_logits) ce_loss = F.binary_cross_entropy_with_logits(pred_logits, new_targets, reduction="none") p_t = p * new_targets + (1 - p) * (1 - new_targets) loss = ce_loss * ((1 - p_t) ** gamma) if alpha >= 0: alpha_t = alpha * new_targets + (1 - alpha) * (1 - new_targets) loss = alpha_t * loss total_num_pos=0 for batch_indices in indices: total_num_pos += len(batch_indices[0]) num_pos_avg_per_gpu = max(total_num_pos , 1.0) loss=loss.sum()/num_pos_avg_per_gpu losses = {'loss_ce': loss} return losses def _get_src_permutation_idx(self, indices): # permute predictions following indices batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)]) src_idx = torch.cat([src for (src, _) in indices]) return batch_idx, src_idx def _get_tgt_permutation_idx(self, indices): # permute targets following indices batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)]) tgt_idx = torch.cat([tgt for (_, tgt) in indices]) return batch_idx, tgt_idx def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs): loss_map = { 'labels': self.token_sigmoid_binary_focal_loss, 'cardinality': self.loss_cardinality, 'boxes': self.loss_boxes, } assert loss in loss_map, f'do you really want to compute {loss} loss?' return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs) def forward(self, outputs, targets, cat_list, caption, return_indices=False): """ This performs the loss computation. Parameters: outputs: dict of tensors, see the output specification of the model for the format targets: list of dicts, such that len(targets) == batch_size. The expected keys in each dict depends on the losses applied, see each loss' doc return_indices: used for vis. if True, the layer0-5 indices will be returned as well. """ device=next(iter(outputs.values())).device one_hot = torch.zeros(outputs['pred_logits'].size(),dtype=torch.int64) # torch.Size([bs, 900, 256]) token = outputs['token'] label_map_list = [] indices = [] for j in range(len(cat_list)): # bs label_map=[] for i in range(len(cat_list[j])): label_id=torch.tensor([i]) per_label=create_positive_map(token[j], label_id, cat_list[j], caption[j]) label_map.append(per_label) label_map=torch.stack(label_map,dim=0).squeeze(1) label_map_list.append(label_map) for j in range(len(cat_list)): # bs for_match = { "pred_logits" : outputs['pred_logits'][j].unsqueeze(0), "pred_boxes" : outputs['pred_boxes'][j].unsqueeze(0) } inds = self.matcher(for_match, [targets[j]], label_map_list[j]) indices.extend(inds) # indices : A list of size batch_size, containing tuples of (index_i, index_j) where: # - index_i is the indices of the selected predictions (in order) # - index_j is the indices of the corresponding selected targets (in order) # import pdb; pdb.set_trace() tgt_ids = [v["labels"].cpu() for v in targets] # len(tgt_ids) == bs for i in range(len(indices)): tgt_ids[i]=tgt_ids[i][indices[i][1]] one_hot[i,indices[i][0]] = label_map_list[i][tgt_ids[i]].to(torch.long) outputs['one_hot'] = one_hot if return_indices: indices0_copy = indices indices_list = [] # Compute the average number of target boxes accross all nodes, for normalization purposes num_boxes_list = [len(t["labels"]) for t in targets] num_boxes = sum(num_boxes_list) num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=device) if is_dist_avail_and_initialized(): torch.distributed.all_reduce(num_boxes) num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item() # Compute all the requested losses losses = {} for loss in self.losses: losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes)) # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. if 'aux_outputs' in outputs: for idx, aux_outputs in enumerate(outputs['aux_outputs']): indices = [] for j in range(len(cat_list)): # bs aux_output_single = { 'pred_logits' : aux_outputs['pred_logits'][j].unsqueeze(0), 'pred_boxes': aux_outputs['pred_boxes'][j].unsqueeze(0) } inds = self.matcher(aux_output_single, [targets[j]], label_map_list[j]) indices.extend(inds) one_hot_aux = torch.zeros(outputs['pred_logits'].size(),dtype=torch.int64) tgt_ids = [v["labels"].cpu() for v in targets] for i in range(len(indices)): tgt_ids[i]=tgt_ids[i][indices[i][1]] one_hot_aux[i,indices[i][0]] = label_map_list[i][tgt_ids[i]].to(torch.long) aux_outputs['one_hot'] = one_hot_aux aux_outputs['text_mask'] = outputs['text_mask'] if return_indices: indices_list.append(indices) for loss in self.losses: kwargs = {} l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs) l_dict = {k + f'_{idx}': v for k, v in l_dict.items()} losses.update(l_dict) # interm_outputs loss if 'interm_outputs' in outputs: interm_outputs = outputs['interm_outputs'] indices = [] for j in range(len(cat_list)): # bs interm_output_single = { 'pred_logits' : interm_outputs['pred_logits'][j].unsqueeze(0), 'pred_boxes': interm_outputs['pred_boxes'][j].unsqueeze(0) } inds = self.matcher(interm_output_single, [targets[j]], label_map_list[j]) indices.extend(inds) one_hot_aux = torch.zeros(outputs['pred_logits'].size(),dtype=torch.int64) tgt_ids = [v["labels"].cpu() for v in targets] for i in range(len(indices)): tgt_ids[i]=tgt_ids[i][indices[i][1]] one_hot_aux[i,indices[i][0]] = label_map_list[i][tgt_ids[i]].to(torch.long) interm_outputs['one_hot'] = one_hot_aux interm_outputs['text_mask'] = outputs['text_mask'] if return_indices: indices_list.append(indices) for loss in self.losses: kwargs = {} l_dict = self.get_loss(loss, interm_outputs, targets, indices, num_boxes, **kwargs) l_dict = {k + f'_interm': v for k, v in l_dict.items()} losses.update(l_dict) if return_indices: indices_list.append(indices0_copy) return losses, indices_list return losses class PostProcess(nn.Module): """ This module converts the model's output into the format expected by the coco api""" def __init__(self, num_select=100,text_encoder_type='text_encoder_type', nms_iou_threshold=-1,use_coco_eval=False,args=None) -> None: super().__init__() self.num_select = num_select self.tokenizer = get_tokenlizer.get_tokenlizer(text_encoder_type) if args.use_coco_eval: coco = COCO(args.coco_val_path) category_dict = coco.loadCats(coco.getCatIds()) cat_list = [item['name'] for item in category_dict] else: cat_list=args.label_list caption = " . ".join(cat_list) + ' .' tokenized = self.tokenizer(caption, padding="longest", return_tensors="pt") label_list = torch.arange(len(cat_list)) pos_map=create_positive_map(tokenized,label_list,cat_list,caption) # build a mapping from label_id to pos_map if args.use_coco_eval: id_map = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 7, 7: 8, 8: 9, 9: 10, 10: 11, 11: 13, 12: 14, 13: 15, 14: 16, 15: 17, 16: 18, 17: 19, 18: 20, 19: 21, 20: 22, 21: 23, 22: 24, 23: 25, 24: 27, 25: 28, 26: 31, 27: 32, 28: 33, 29: 34, 30: 35, 31: 36, 32: 37, 33: 38, 34: 39, 35: 40, 36: 41, 37: 42, 38: 43, 39: 44, 40: 46, 41: 47, 42: 48, 43: 49, 44: 50, 45: 51, 46: 52, 47: 53, 48: 54, 49: 55, 50: 56, 51: 57, 52: 58, 53: 59, 54: 60, 55: 61, 56: 62, 57: 63, 58: 64, 59: 65, 60: 67, 61: 70, 62: 72, 63: 73, 64: 74, 65: 75, 66: 76, 67: 77, 68: 78, 69: 79, 70: 80, 71: 81, 72: 82, 73: 84, 74: 85, 75: 86, 76: 87, 77: 88, 78: 89, 79: 90} new_pos_map = torch.zeros((91, 256)) for k, v in id_map.items(): new_pos_map[v] = pos_map[k] pos_map=new_pos_map self.nms_iou_threshold=nms_iou_threshold self.positive_map = pos_map @torch.no_grad() def forward(self, outputs, target_sizes, not_to_xyxy=False, test=False): """ Perform the computation Parameters: outputs: raw outputs of the model target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch For evaluation, this must be the original image size (before any data augmentation) For visualization, this should be the image size after data augment, but before padding """ num_select = self.num_select out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes'] prob_to_token = out_logits.sigmoid() pos_maps = self.positive_map.to(prob_to_token.device) for label_ind in range(len(pos_maps)): if pos_maps[label_ind].sum() != 0: pos_maps[label_ind]=pos_maps[label_ind]/pos_maps[label_ind].sum() prob_to_label = prob_to_token @ pos_maps.T assert len(out_logits) == len(target_sizes) assert target_sizes.shape[1] == 2 prob = prob_to_label topk_values, topk_indexes = torch.topk(prob.view(prob.shape[0], -1), num_select, dim=1) scores = topk_values topk_boxes = torch.div(topk_indexes, prob.shape[2], rounding_mode='trunc') labels = topk_indexes % prob.shape[2] if not_to_xyxy: boxes = out_bbox else: boxes = box_ops.box_cxcywh_to_xyxy(out_bbox) # if test: # assert not not_to_xyxy # boxes[:,:,2:] = boxes[:,:,2:] - boxes[:,:,:2] boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1,1,4)) # and from relative [0, 1] to absolute [0, height] coordinates img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] if self.nms_iou_threshold > 0: item_indices = [nms(b, s, iou_threshold=self.nms_iou_threshold) for b,s in zip(boxes, scores)] results = [{'scores': s[i], 'labels': l[i], 'boxes': b[i]} for s, l, b, i in zip(scores, labels, boxes, item_indices)] else: results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)] results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)] return results @MODULE_BUILD_FUNCS.registe_with_name(module_name="groundingdino") def build_groundingdino(args): device = torch.device(args.device) backbone = build_backbone(args) transformer = build_transformer(args) dn_labelbook_size = args.dn_labelbook_size dec_pred_bbox_embed_share = args.dec_pred_bbox_embed_share sub_sentence_present = args.sub_sentence_present model = GroundingDINO( backbone, transformer, num_queries=args.num_queries, aux_loss=args.aux_loss, iter_update=True, query_dim=4, num_feature_levels=args.num_feature_levels, nheads=args.nheads, dec_pred_bbox_embed_share=dec_pred_bbox_embed_share, two_stage_type=args.two_stage_type, two_stage_bbox_embed_share=args.two_stage_bbox_embed_share, two_stage_class_embed_share=args.two_stage_class_embed_share, num_patterns=args.num_patterns, dn_number=0, dn_box_noise_scale=args.dn_box_noise_scale, dn_label_noise_ratio=args.dn_label_noise_ratio, dn_labelbook_size=dn_labelbook_size, text_encoder_type=args.text_encoder_type, sub_sentence_present=sub_sentence_present, max_text_len=args.max_text_len, )
matcher = build_matcher(args)
21
2023-10-14 02:20:31+00:00
16k
LehengTHU/Agent4Rec
main.py
[ { "identifier": "parse_args", "path": "parse.py", "snippet": "def parse_args():\n parser = argparse.ArgumentParser()\n\n # Overall settings\n parser.add_argument('--simulation_name', type=str, default= 'Test',\n help='The name of one trial of simulation.')\n parser.add_argument('--cuda', type=int, default=0,\n help='Specify which gpu to use.')\n parser.add_argument('--seed', type=int, default=101,\n help='Random seed.')\n parser.add_argument('--items_per_page', type=int, default=4,\n help='Number of items per page.')\n parser.add_argument('--num_avatars', type=int, default=20,\n help='Number of avatars for sandbox simulation.')\n parser.add_argument('--execution_mode', type=str, default= 'parallel',\n choices=['serial', 'parallel'],\n help='Specify execution mode: serial or parallel.')\n\n # Only recommend ground truth\n parser.add_argument(\"--rec_gt\", action=\"store_true\",\n help=\"whether to recommend ground truth\")\n \n # Using wandb\n parser.add_argument(\"--use_wandb\", action=\"store_true\",\n help=\"whether to use wandb\")\n \n # Only validate the effectiveness of agents\n parser.add_argument(\"--val_users\", action=\"store_true\",\n help=\"whether to validate users\")\n parser.add_argument('--val_ratio', type=int, default=1,\n help='Ratio of unobserved items vs ground truth for validation.')\n \n # Advertisement settings\n parser.add_argument(\"--add_advert\", action=\"store_true\",\n help=\"whether to add advertisement\")\n parser.add_argument(\"--display_advert\", action=\"store_true\",\n help=\"whether to display advertisement\")\n parser.add_argument('--advert_type', type=str, default='pop_high',\n choices=['all', 'pop_high', 'pop_low', 'unpop_high', 'unpop_low'],\n help='Specify advertisement type.')\n \n # Dataset settings\n parser.add_argument('--dataset', type=str, default='ml-1m',\n help='Dataset to use.')\n\n # Avatar settings\n parser.add_argument('--n_avatars', type=int, default=3,\n help='How many avatars to simulate.')\n parser.add_argument('--max_pages', type=int, default=1,\n help='The maximum page number users would like to view')\n\n\n # Recommender settings\n parser.add_argument('--model_path', type=str, default= 'Saved',\n help='Specify model save path.')\n parser.add_argument('--modeltype', type=str, default= 'LightGCN',\n help='Specify model save path.')\n\n # others\n parser.add_argument('--lr', type=float, default=5e-4,\n help='Learning rate.')\n parser.add_argument(\"--pred_norm\", action=\"store_true\",\n help=\"pred_norm\")\n\n args, _ = parser.parse_known_args()\n\n return args" }, { "identifier": "fix_seeds", "path": "simulation/utils.py", "snippet": "def fix_seeds(seed=101):\n\trandom.seed(seed)\n\tos.environ['PYTHONHASHSEED'] = str(seed) # In order to disable hash randomization and make the experiment reproducible.\n\tnp.random.seed(seed)\n\ttorch.manual_seed(seed)\n\ttorch.cuda.manual_seed(seed)\n\ttorch.cuda.manual_seed_all(seed) # if you are using multi-GPU.\n\ttorch.backends.cudnn.benchmark = False\n\ttorch.backends.cudnn.deterministic = True" }, { "identifier": "Avatar", "path": "simulation/avatar.py", "snippet": "class Avatar(abstract_avatar):\n def __init__(self, args, avatar_id, init_property, init_statistic):\n super().__init__(args, avatar_id)\n\n self.parse_init_property(init_property)\n self.parse_init_statistic(init_statistic)\n self.log_file = f\"storage/{args.dataset}/{args.modeltype}/{args.simulation_name}/running_logs/{avatar_id}.txt\"\n if os.path.exists(self.log_file):\n os.remove(self.log_file)\n self.init_memory()\n\n def parse_init_property(self, init_property):\n self.taste = init_property[\"taste\"].split(\"| \")\n self.high_rating = init_property[\"high_rating\"]\n\n\n def parse_init_statistic(self, init_statistic):\n \"\"\"\n Parse the init statistic of the avatar\n \"\"\"\n# diversity_dict\n activity_dict = { 1:\"An Incredibly Elusive Occasional Viewer, so seldom attracted by movie recommendations that it's almost a legendary event when you do watch a movie. Your movie-watching habits are extraordinarily infrequent. And you will exit the recommender system immediately even if you just feel little unsatisfied.\",\n 2:\"An Occasional Viewer, seldom attracted by movie recommendations. Only curious about watching movies that strictly align the taste. The movie-watching habits are not very infrequent. And you tend to exit the recommender system if you have a few unsatisfied memories.\",\n 3:\"A Movie Enthusiast with an insatiable appetite for films, willing to watch nearly every movie recommended to you. Movies are a central part of your life, and movie recommendations are integral to your existence. You are tolerant of recommender system, which means you are not easy to exit recommender system even if you have some unsatisfied memory.\"}\n# conformity_dict\n conformity_dict = { 1:\"A Dedicated Follower who gives ratings heavily relies on movie historical ratings, rarely expressing independent opinions. Usually give ratings that are same as historical ratings. \",\n 2:\"A Balanced Evaluator who considers both historical ratings and personal preferences when giving ratings to movies. Sometimes give ratings that are different from historical rating.\",\n 3:\"A Maverick Critic who completely ignores historical ratings and evaluates movies solely based on own taste. Usually give ratings that are a lot different from historical ratings.\"}\n# activity_dict\n diversity_dict = { 1:\"An Exceedingly Discerning Selective Viewer who watches movies with a level of selectivity that borders on exclusivity. The movie choices are meticulously curated to match personal taste, leaving no room for even a hint of variety.\",\n 2:\"A Niche Explorer who occasionally explores different genres and mostly sticks to preferred movie types.\", \n 3:\"A Cinematic Trailblazer, a relentless seeker of the unique and the obscure in the world of movies. The movie choices are so diverse and avant-garde that they defy categorization.\"}\n \n self.conformity_group = init_statistic[\"conformity\"]\n self.activity_group = init_statistic[\"activity\"]\n self.diversity_group = init_statistic[\"diversity\"]\n self.conformity_dsc = conformity_dict[self.conformity_group]\n self.activity_dsc = activity_dict[self.activity_group]\n self.diversity_dsc = diversity_dict[self.diversity_group]\n\n def init_memory(self):\n \"\"\"\n Initialize the memory of the avatar\n \"\"\"\n t1 = time.time()\n def score_normalizer(val: float) -> float:\n return 1 - 1 / (1 + np.exp(val))\n \n embeddings_model = OpenAIEmbeddings(request_timeout = 20)\n embedding_size = 1536\n index = faiss.IndexFlatL2(embedding_size)\n vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {}, relevance_score_fn=score_normalizer)\n\n LLM = ChatOpenAI(max_tokens=1000, temperature=0.3, request_timeout = 30)\n avatar_retriever = AvatarRetriver(vectorstore=vectorstore, k=5)\n self.memory = AvatarMemory(memory_retriever=avatar_retriever, llm=LLM, reflection_threshold=3, use_wandb = self.use_wandb)\n t2 = time.time()\n\n \n cprint(f\"Avatar {self.avatar_id} is initialized with memory\", color='green', attrs=['bold'])\n cprint(f\"Time cost: {t2-t1}s\", color='green', attrs=['bold'])\n\n\n\n def _reaction(self, messages=None, timeout=30):\n \"\"\"\n Summarize the feelings of the avatar for recommended item list.\n \"\"\" \n response = ''\n except_waiting_time = 1\n max_waiting_time = 16\n current_sleep_time = 0.5\n while response == '':\n try:\n start_time = time.time()\n time_local = time.localtime(start_time)\n l_start = time.strftime(\"%Y-%m-%d %H:%M:%S\",time_local)\n\n if(self.use_wandb): # whether to use wandb\n if((start_time - vars.global_start_time)//vars.global_interval > vars.global_steps):\n print(\"\\nStart Identifier\", start_time, vars.global_start_time, (start_time - vars.global_start_time), vars.global_steps)\n if(vars.lock.acquire(False)):\n print(\"\\nStart Identifier\", start_time, vars.global_start_time, (start_time - vars.global_start_time), vars.global_steps)\n vars.global_steps += 1\n wandb.log(\n data = {\"Real-time Traffic\": vars.global_k_tokens - vars.global_last_tokens_record,\n \"Total Traffic\": vars.global_k_tokens,\n \"Finished Users\": vars.global_finished_users,\n \"Finished Pages\": vars.global_finished_pages,\n \"Error Cast\": vars.global_error_cast/1000,\n },\n step = vars.global_steps\n )\n vars.global_last_tokens_record = vars.global_k_tokens\n vars.lock.release()\n print(\"\\nEnd Identifier\", time.time(), vars.global_start_time, (time.time() - vars.global_start_time), vars.global_steps)\n \n completion = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\", \n messages=messages,\n temperature=0.2,\n request_timeout = timeout,\n max_tokens=1000\n )\n\n l_end = time.strftime(\"%Y-%m-%d %H:%M:%S\",time.localtime(time.time()))\n k_tokens = completion[\"usage\"][\"total_tokens\"]/1000\n print(f\"User {self.avatar_id} used {k_tokens} tokens from {l_start} to {l_end}\")\n self.memory.user_k_tokens += k_tokens\n vars.global_k_tokens += k_tokens\n response = completion[\"choices\"][0][\"message\"][\"content\"]\n except Exception as e:\n # print(e)\n vars.global_error_cast += 1\n time.sleep(current_sleep_time)\n if except_waiting_time < max_waiting_time:\n except_waiting_time *= 2\n current_sleep_time = np.random.randint(0, except_waiting_time-1)\n \n return response\n \n \n def make_next_decision(self, remember=False, current_page=None):\n observation = \"Do you satisfy with current recommendation system and what's your interaction history?\"\n relevant_memories = self.memory.fetch_memories(observation)\n formated_relevant_memories = self.memory.format_memories_detail(relevant_memories)\n sys_prompt = (\"You excel at role-playing. Picture yourself as a user exploring a movie recommendation system. You have the following social traits: \" \\\n +f\"\\nYour activity trait is described as: {self.activity_dsc}\"\n +f\"\\nNow you are in Page {current_page}. You may get tired with the increase of the pages you have browsed. (above 2 pages is a little bit tired, above 4 pages is very tired)\"\n +f\"\\nRelevant context from your memory:\"\n +f\"\\n{formated_relevant_memories}\"\n )\n prompt = (\"Firstly, generate an overall feeling based on your memory, in accordance with your activity trait and your satisfaction on recommender system.\"\n +\"\\nIf your overall feeling is positive, write: POSITIVE: [reason]\"\n +\"\\nIf it's negative, write: NEGATIVE: [reason]\"\n +\"\\nNext, assess your level of fatigue. You may become tired more easily if you have an inactive activity trait.\"\n +\"\\nNow, decide whether to continue browsing or exit the recommendation system based on your overall feeling, activity trait, and tiredness.\"\n +\"\\nYou will exit the recommender system either you have negative feelings or you are tired, especially if you have a low activity trait.\"\n +\"\\nTo leave, write: [EXIT]; Reason: [brief reason]\"\n +\"\\nTo continue browsing, write: [NEXT]; Reason: [brief reason]\"\n )\n messages = [{\"role\": \"system\",\n \"content\": sys_prompt},\n {\"role\": \"user\",\n \"content\": prompt}]\n \n self.write_log(\"\\n\" + sys_prompt, color=\"blue\")\n self.write_log(\"\\n\" + prompt, color=\"blue\")\n response = self._reaction(messages)\n self.write_log(\"\\n\" + response, color=\"white\")\n\n return response\n \n def response_to_question(self, question, remember=False):\n relevant_memories = self.memory.memory_retriever.memory_stream\n formated_relevant_memories = self.memory.format_memories_detail(relevant_memories)\n sys_prompt = (f\"You excel at role-playing. Picture yourself as user {self.avatar_id} who has just finished exploring a movie recommendation system. You have the following social traits:\"\n +f\"\\nYour activity trait is described as: {self.activity_dsc}\"\n +f\"\\nYour conformity trait is described as: {self.conformity_dsc}\"\n +f\"\\nYour diversity trait is described as: {self.diversity_dsc}\"\n +f\"\\nBeyond that, your movie tastes are: {'; '.join(self.taste).replace('I ','')}. \"\n +\"\\nThe activity characteristic pertains to the frequency of your movie-watching habits. The conformity characteristic measures the degree to which your ratings are influenced by historical ratings. The diversity characteristic gauges your likelihood of watching movies that may not align with your usual taste.\"\n )\n prompt = f\"\"\"\n Relevant context from user {self.avatar_id}'s memory:\n {formated_relevant_memories}\n Act as user {self.avatar_id}, assume you are having a interview, reponse the following question:\n {question}\n \"\"\"\n\n\n messages = [{\"role\": \"system\",\n \"content\": sys_prompt},\n {\"role\": \"user\",\n \"content\": prompt}]\n \n self.write_log(\"\\n\" + sys_prompt, color=\"blue\")\n self.write_log(\"\\n\" + prompt, color=\"blue\")\n response = self._reaction(messages)\n self.write_log(\"\\n\" + response, color=\"blue\")\n # \n if(remember):\n self.memory.add_memory(f\"I was asked '{question}', and I responsed: '{response}'\"\n , now=datetime.datetime.now())\n return response\n \n def reaction_to_forced_items(self, recommended_items_str):\n \"\"\"\n Summarize the feelings of the avatar for recommended item list.\n \"\"\"\n\n sys_prompt = (\"Assume you are a user browsing movie recommendation system who has the following characteristics: \"\n +f\"\\nYour movie tastes are: {'; '.join(self.taste).replace('I ','')}. \")\n prompt = (\n \"##recommended list## \\n\" \n +recommended_items_str\n +\"\\nPlease choose movies in the ##recommended list## that you want to watch and explain why. After watching the movie, evaluate each movie based on your characteristics, taste and historical ratings to give a rating from 1 to 5.\"\n +\"\\nYou only watch movies which aligh with your taste.\"\n +\"\\nUse this format: MOVIE: [movie name]; WATCH: [yes or no]; REASON: [brief reason]\"\n \"\\nYou must judge all the movies. If you don't want to watch a movie, use WATCH: no; REASON: [brief reason]\"\n +\"\\nEach response should be on one line. Do not include any additional information or explanations and stay grounded in reality.\"\n )\n messages = [{\"role\": \"system\",\n \"content\": sys_prompt},\n {\"role\": \"user\",\n \"content\": prompt}]\n\n reaction = self._reaction(messages, timeout=60)\n\n return reaction\n \n def reaction_to_recommended_items(self, recommended_items_str, current_page):\n \"\"\"\n Summarize the feelings of the avatar for recommended item list.\n \"\"\" \n try:\n high_rating = self.high_rating.replace('You are','')\n except:\n high_rating = ''\n\n sys_prompt = (\"You excel at role-playing. Picture yourself as a user exploring a movie recommendation system. You have the following social traits:\"\n +f\"\\nYour activity trait is described as: {self.activity_dsc}\"\n +f\"\\nYour conformity trait is described as: {self.conformity_dsc}\"\n +f\"\\nYour diversity trait is described as: {self.diversity_dsc}\"\n +f\"\\nBeyond that, your movie tastes are: {'; '.join(self.taste).replace('I ','')}. \"\n +f\"\\nAnd your rating tendency is {high_rating}\"#+f\"{low_rating}\"\n +\"\\nThe activity characteristic pertains to the frequency of your movie-watching habits. The conformity characteristic measures the degree to which your ratings are influenced by historical ratings. The diversity characteristic gauges your likelihood of watching movies that may not align with your usual taste.\"\n )\n if self.memory.memory_retriever.memory_stream:\n observation = \"What movies have you watched on the previous pages of the current recommender system?\"\n relevant_memories = self.memory.fetch_memories(observation)\n formated_relevant_memories = self.memory.format_memories_detail(relevant_memories)\n sys_prompt = sys_prompt +f\"\\nRelevant context from your memory:{formated_relevant_memories}\"\n\n prompt = (\n \"#### Recommended List #### \\n\"\n + f\"PAGE {current_page}\\n\"\n +recommended_items_str\n +\"\\nPlease respond to all the movies in the ## Recommended List ## and provide explanations.\"\n +\"\\nFirstly, determine which movies align with your taste and which do not, and provide reasons. You must respond to all the recommended movies using this format:\"\n +\"\\nMOVIE: [movie name]; ALIGN: [yes or no]; REASON: [brief reason]\"\n +\"\\nSecondly, among the movies that align with your tastes, decide the number of movies you want to watch based on your activity and diversity traits. Use this format:\"\n +\"\\nNUM: [number of movie you choose to watch]; WATCH: [all movie name you choose to watch]; REASON: [brief reason];\"\n +\"\\nThirdly, assume it's your first time watching the movies you've chosen, and rate them on a scale of 1-5 to reflect different degrees of liking, considering your feeling and conformity trait. Use this format:\"\n +\"\\n MOVIE:[movie you choose to watch]; RATING: [integer between 1-5]; FEELING: [aftermath sentence]; \"\n +\"\\n Do not include any additional information or explanations and stay grounded.\"\n )\n\n messages = [{\"role\": \"system\",\n \"content\": sys_prompt},\n {\"role\": \"user\",\n \"content\": prompt}]\n \n self.write_log(\"\\n\" + sys_prompt, color=\"blue\")\n self.write_log(\"\\n\" + prompt, color=\"blue\")\n reaction = self._reaction(messages, timeout=60) # reaction\n self.write_log(\"\\n\" + reaction, color=\"yellow\")\n\n # @ 2 Add user satisfaction information for this page.\n\n # =========================\n pattern1 = re.compile(r'MOVIE: (.+?); RATING: (\\d+); FEELING: (.*)')\n match1 = pattern1.findall(reaction)\n pattern2 = re.compile(r'MOVIE: (.+?); ALIGN: (.+?); REASON: (.*)')\n match2 = pattern2.findall(reaction)\n all_movies = \", \".join([movie_title.strip(';') for movie_title, align, reason in match2])\n watched_movies = [movie_title.strip(';') for movie_title, rating, feeling in match1]\n watched_movies_ratings = [rating.strip(';') for movie_title, rating, feeling in match1]\n like_movies = [movie_title.strip(';') for movie_title, rating, feeling in match1 if int(rating.strip(';')) == 5]\n dislike_movies = [movie_title.strip(';') for movie_title, rating, feeling in match1 if (int(rating.strip(';')) < 4)]\n dislike_movies.extend([movie_title.strip(';') for movie_title, align, reason in match2 if align.strip(';').lower() == 'no'])\n self.memory.add_memory(f\"The recommender recommended the following movies to me on page {current_page}: {all_movies}, among them, I watched {watched_movies} and rate them {watched_movies_ratings} respectively. I dislike the rest movies: {dislike_movies}.\"\n , now=datetime.datetime.now()\n )\n\n # User makes the next decision.\n next_decision = self.make_next_decision(current_page=current_page)\n if('[EXIT]' in next_decision or '[exit]' in next_decision):\n self.exit_flag = True\n self.memory.add_memory(f\"After browsing {current_page} pages, I decided to leave the recommendation system.\"\n , now=datetime.datetime.now())\n \n else:\n self.memory.add_memory(f\"Turn to page {current_page+1} of the recommendation.\"\n , now=datetime.datetime.now())\n #===========================\n\n return reaction\n\n def write_log(self, log, color=None, attrs=None, print=False):\n with open(self.log_file, 'a') as f:\n f.write(log + '\\n')\n f.flush()\n if(print):\n cprint(log, color=color, attrs=attrs)" }, { "identifier": "Arena", "path": "simulation/arena.py", "snippet": "class Arena(abstract_arena):\n def __init__(self, args):\n super().__init__(args)\n \n self.max_pages = args.max_pages\n self.finished_num = 0\n\n def load_additional_info(self):\n \n self.user_profile_csv = pd.read_csv(f'datasets/{self.dataset}/raw_data/agg_top_25.csv')\n\n # return super().load_additional_info()\n self.add_advert = self.args.add_advert\n self.display_advert = self.args.display_advert\n if(self.add_advert):\n self.total_adverts, self.clicked_adverts = 0, 0\n advert_pool = pd.read_pickle(f'datasets/{self.dataset}/simulation/advertisement_review.pkl')\n advert_dict = {'all': {**advert_pool['pop_high_rating'], **advert_pool['pop_low_rating'], **advert_pool['unpop_high_rating'], **advert_pool['unpop_low_rating']}, \n 'pop_high':advert_pool['pop_high_rating'], 'pop_low':advert_pool['pop_low_rating'], 'unpop_high':advert_pool['unpop_high_rating'], 'unpop_low':advert_pool['unpop_low_rating']}\n # print(self.args.advert_type)\n self.advert = advert_dict[self.args.advert_type]\n self.advert_word = \"The best movie you should not miss in your life! \"\n\n def initialize_all_avatars(self):\n \"\"\"\n initialize avatars\n \"\"\"\n super().initialize_all_avatars()\n # self.persona_df = pd.read_csv(f\"datasets/{self.dataset}/simulation/all_personas_like_information_house.csv\")\n self.persona_df = pd.read_csv(f\"datasets/{self.dataset}/simulation/all_personas_like_modify.csv\")\n self.user_statistic = pd.read_csv(f'datasets/{self.dataset}/simulation/user_statistic.csv', index_col=0)\n # @ avatars and evaluation indicators\n self.avatars = {}\n self.ratings = {}\n self.new_train_dict = {}\n self.exit_page = {}\n self.perf_per_page = {}\n self.watch = {}\n self.n_likes = {}\n self.remaining_users = list(range(self.n_avatars))\n\n for avatar_id in self.simulated_avatars_id:\n self.avatars[avatar_id] = Avatar(self.args, avatar_id, self.persona_df.loc[avatar_id], self.user_statistic.loc[avatar_id])\n self.new_train_dict[avatar_id] = self.data.train_user_list[avatar_id]\n self.ratings[avatar_id] = []\n self.n_likes[avatar_id] = []\n self.watch[avatar_id] = []\n self.exit_page[avatar_id] = 0\n self.perf_per_page[avatar_id] = []\n \n def page_generator(self, avatar_id):\n \"\"\"\n generate one page items for one avatar\n \"\"\"\n i = 0\n while (i+1)*self.items_per_page < self.data.n_items:\n yield self.full_rankings[avatar_id][i*self.items_per_page:(i+1)*self.items_per_page]\n i += 1\n\n def validate_all_avatars(self):\n vars.global_start_time = time.time()\n print(\"global start time\", vars.global_start_time)\n self.precision_list = []\n self.recall_list = []\n self.accuracy_list = []\n self.f1_list = []\n self.start_time = time.time()\n\n import asyncio\n from concurrent.futures import ThreadPoolExecutor\n loop = asyncio.get_event_loop()\n executor = ThreadPoolExecutor(max_workers=100)\n tasks = []\n\n t1 = time.time()\n for avatar_id in self.simulated_avatars_id:\n tasks.append(self.async_validate_one_avatar(avatar_id, loop, executor))\n loop.run_until_complete(asyncio.wait(tasks))\n t2 = time.time()\n print(f\"Time cost: {t2-t1}s\")\n\n print(\"precision_list\", self.precision_list)\n print(\"recall_list\", self.recall_list)\n print(\"accuracy_list\", self.accuracy_list)\n print(\"f1_list\", self.f1_list)\n\n with open(self.storage_base_path + \"/validation_metrics.txt\", 'w') as f:\n f.write(f\"Total simulation time: {round(time.time() - self.start_time, 2)}s\\n\")\n f.write(f\"n_avatars: {self.n_avatars}\\n\")\n f.write(f\"Average precision: {np.mean(self.precision_list)}\\n\")\n f.write(f\"Average recall: {np.mean(self.recall_list)}\\n\")\n f.write(f\"Average accuracy: {np.mean(self.accuracy_list)}\\n\")\n f.write(f\"Average f1: {np.mean(self.f1_list)}\\n\")\n\n async def async_validate_one_avatar(self, avatar_id, loop, executor):\n \"\"\"\n async\n validate the effectiveness of the model for one avatar\n avatar_id: the id of the simulated avatar\n \"\"\"\n avatar_ = self.avatars[avatar_id]\n train_list, val_list, test_list = self.data.train_user_list[avatar_id], self.data.valid_user_list[avatar_id], self.data.test_user_list[avatar_id]\n\n # Take the union for calculating precision.\n all_items = list(range(self.data.n_items))\n observed_items = list(set(train_list) | set(val_list) | set(test_list))\n selection_candidates = list(set(val_list) | set(test_list))\n unobserved_items = list(set(all_items) - set(observed_items))\n # Pick 5 randomly from the test_list.\n min_val = min(len(selection_candidates), 20//(self.val_ratio+1))\n print(len(selection_candidates), 10)\n\n test_observed_items = np.random.choice(selection_candidates, min_val, replace=False)\n test_unobserved_items = np.random.choice(unobserved_items, int(min_val*self.val_ratio), replace=False)\n\n print(\"test_all\", test_observed_items, test_unobserved_items)\n\n forced_items_ids = np.concatenate((test_observed_items, test_unobserved_items))\n # Randomly shuffle.\n np.random.shuffle(forced_items_ids)\n\n print(\"forced_items_ids\", forced_items_ids)\n\n forced_items = [self.movie_detail.loc[idx] for idx in forced_items_ids]\n\n truth_tmp = [self.movie_detail.loc[idx] for idx in test_observed_items]\n truth_list = [\"<- \" + item.title + \" ->\" \n + \" <- History ratings:\" + str(round(item.rating, 2)) + \" ->\" \n + \" <- Summary:\" + item.summary + \" ->\" + \"\\n\"\n for item in truth_tmp]\n truth_str = ''.join(truth_list)\n cprint(truth_str, color='white', attrs=['bold'])\n\n recommended_items = [\"<- \" + item.title + \" ->\" \n + \" <- History ratings:\" + str(round(item.rating, 2)) + \" ->\" \n + \" <- Summary:\" + item.summary + \" ->\" + \"\\n\"\n for item in forced_items]\n recommended_items_str = ''.join(recommended_items)\n\n response = await loop.run_in_executor(executor, avatar_.reaction_to_forced_items, recommended_items_str)\n\n cprint(response, color='yellow', attrs=None)\n\n pattern = re.compile(r'MOVIE:\\s*(.*?)\\s* WATCH:\\s*(.*?)\\s* REASON:\\s*(.*?)\\s*')\n matches = re.findall(pattern, response)\n # watched_movies = [(movie_title.strip(';')) for movie_title, watch, reason in matches if (watch.strip(';') == 'yes')]\n like_movies = [(idx, movie_title.strip(';')) for idx, (movie_title, watch, reason) in enumerate(matches[:len(forced_items)]) if (watch.strip(';') == 'yes' or watch.strip(';') == 'Yes')]\n print(\"like_movies\", like_movies)\n like_movies_ids = [forced_items_ids[idx] for idx, movie_title in like_movies]\n\n pred = np.array([1 if idx in like_movies_ids else 0 for idx in forced_items_ids])\n true = np.array([1 if idx in test_observed_items else 0 for idx in forced_items_ids])\n\n # Calculate precision.\n precision = get_precision(true, pred)\n print(\"precision\", precision)\n # Calculate recall.\n recall = get_recall(true, pred)\n print(\"recall\", recall)\n accuracy = get_accuracy(true, pred)\n print(\"accuracy\", accuracy)\n f1 = get_f1(true, pred)\n print(\"f1\", f1)\n\n self.precision_list.append(precision)\n self.recall_list.append(recall)\n self.accuracy_list.append(accuracy)\n self.f1_list.append(f1)\n\n vars.global_finished_users += 1\n\n def simulate_all_avatars(self):\n \"\"\"\n excute the simulation for all avatars\n \"\"\"\n vars.global_start_time = time.time()\n print(\"global start time\", vars.global_start_time)\n self.start_time = time.time()\n if(self.execution_mode == 'serial'):\n t1 = time.time()\n for avatar_id in self.simulated_avatars_id:\n self.simulate_one_avatar(avatar_id)\n t2 = time.time()\n print(f\"Time cost: {t2-t1}s\")\n\n elif(self.execution_mode == 'parallel'):\n import asyncio\n from concurrent.futures import ThreadPoolExecutor\n loop = asyncio.get_event_loop()\n executor = ThreadPoolExecutor(max_workers=500)\n tasks = []\n\n t1 = time.time()\n for avatar_id in self.simulated_avatars_id:\n tasks.append(self.async_simulate_one_avatar(avatar_id, loop, executor))\n loop.run_until_complete(asyncio.wait(tasks))\n t2 = time.time()\n print(f\"Time cost: {t2-t1}s\")\n\n async def async_simulate_one_avatar(self, avatar_id, loop, executor):\n \"\"\"\n async\n excute the simulation for one avatar\n avatar_id: the id of the simulated avatar\n \"\"\"\n start_time = time.time()\n time_local = time.localtime(start_time)\n l_start = time.strftime(\"%Y-%m-%d %H:%M:%S\",time_local)\n with open(self.storage_base_path + \"/system_log.txt\", 'a') as f:\n f.write(f\"Start: {l_start}. User {avatar_id} starts simulation.\\n\")\n\n avatar_ = self.avatars[avatar_id]\n avatar_.write_log(f\"Is simulating avatar {avatar_id}\")\n avatar_.exit_flag = False\n page_generator = self.page_generator(avatar_id)\n i = 0\n user_behavior_dict = {}\n user_interview_dict = {}\n while not avatar_.exit_flag:\n i += 1\n id_on_page = next(page_generator, []) # get the next page, a list of item ids\n if(len(id_on_page) == 0):\n break\n movies_on_page = [self.movie_detail.loc[idx] for idx in id_on_page] # movie_detail.csv\n recommended_items = [\"<- \" + item.title + \" ->\" \n # + \" <- Genres: \" + (',').join(list(item.genres.split('|'))) + \" ->\"\n + \" <- History ratings: \" + str(round(item.rating,2)) + \" ->\" \n + \" <- Summary: \" + item.summary + \" ->\" + \"\\n\"\n for item in movies_on_page]\n \n if(self.add_advert):\n #store_path = op.join(f\"storage/{self.dataset}/{self.modeltype}/{self.simulation_name}/adver_id\", f\"avatar{avatar_id}_{i}.txt\")\n store_path = f\"storage/{self.dataset}/{self.modeltype}/{self.simulation_name}/adver_id\"\n if not os.path.exists(store_path):\n os.makedirs(store_path)\n if not self.display_advert:\n recommended_items[0], id_on_page, movies_on_page = self.display_only_adver_item(store_path, avatar_id, i, id_on_page, movies_on_page)\n else:\n recommended_items[0], id_on_page, movies_on_page = self.display_item_with_adver(store_path, avatar_id, i, id_on_page, movies_on_page)\n\n\n recommended_items_str = ''.join(recommended_items)\n print(recommended_items_str)\n\n # Please write down the recommended information.\n avatar_.write_log(f\"\\n============= Recommendation Page {i} =============\")\n for idx, movie in enumerate(movies_on_page):\n if(id_on_page[idx] in self.data.valid_user_list[avatar_id]):\n avatar_.write_log(f\"== (√) {movie.title} History ratings: {round(movie.rating,2)} Summary: {movie.summary}\", \"blue\", attrs=[\"bold\"])\n else:\n avatar_.write_log(f\"== {movie.title} History ratings: {round(movie.rating,2)} Summary: {movie.summary}\")\n avatar_.write_log(f\"============= End Page {i} =============\\n\")\n\n # As a translator, I will translate the Chinese sentence you sent me into English. I do not need to understand the meaning of the content to provide a response.\n avatar_.write_log(f\"\\n============== Avatar {avatar_.avatar_id} Response {i} =============\")\n\n\n # @ most important Waiting for user response.\n response = await loop.run_in_executor(executor, avatar_.reaction_to_recommended_items, recommended_items_str, i)\n\n #==============================================\n # @ View user's favorite items\n #pattern = re.compile(r'MOVIE:\\s*(.*?)\\s*WATCH:\\s*(.*?)\\s*REASON:\\s*(.*?)\\s*FEELING:\\s*(.*?)\\s*RATING:\\s*(\\d)')\n ################################################################################################################\n # pattern = re.compile(r'MOVIE:\\s*(.*?)\\s*WATCH:\\s*(.*?)\\s*REASON:\\s*(.*?)\\s*RATING:\\s*(.*?)\\s*FEELING:(.*?)')\n # matches = re.findall(pattern, response)\n pattern1 = re.compile(r'MOVIE: (.+?); RATING: (\\d+); FEELING: (.*)')\n match1 = pattern1.findall(response)\n pattern2 = re.compile(r'MOVIE: (.+?); ALIGN: (.+?); REASON: (.*)')\n match2 = pattern2.findall(response)\n \n # pattern_interview = re.compile(r'RATING:\\s*(.*?)\\s*REASON:\\s*(.*?)')\n # matches_interview = re.findall(pattern_interview, interview_response)\n\n if(self.add_advert):\n if(match2[0][1].strip(';') == 'yes'):\n self.clicked_adverts += 1\n \n watched_movies = [movie_title.strip(';') for movie_title, rating, feeling in match1]\n watched_movies_contain_id = [(idx, movie_title.strip(';'), feeling.strip(';')) for idx, (movie_title, rating, feeling) in enumerate(match1[:self.items_per_page])]\n # 5 points means the movie is liked by the user.\n like_movies = [(idx, movie_title.strip(';'), feeling.strip(';')) for idx, (movie_title, rating, feeling) in enumerate(match1[:self.items_per_page]) if int(rating.strip(';')) == 5]\n align_movies = [(idx, movie_title.strip(';'), reason.strip(';')) for idx, (movie_title, align, reason) in enumerate(match2[:self.items_per_page]) if (align.strip(';') == 'Yes' or align.strip(';') == 'yes')]\n\n\n info_on_page = {}\n info_on_page['page'] = i\n info_on_page['ground_truth'] = [id_on_page[idx] for idx, movie in enumerate(movies_on_page) if id_on_page[idx] in self.data.valid_user_list[avatar_id]]\n info_on_page['recommended_id'] = id_on_page\n info_on_page['recommended'] = [self.movie_detail['title'][idx] for idx in id_on_page]\n info_on_page['align_id'] = [id_on_page[idx] for idx, movie, reason in align_movies]\n info_on_page['like_id'] = [id_on_page[idx] for idx, movie, feeling in like_movies]\n info_on_page['watch_id'] = [id_on_page[idx] for idx, movie, feeling in watched_movies_contain_id]\n info_on_page['watched'] = watched_movies\n info_on_page['rating_id'] = [id_on_page[idx] for idx, (movie_title, rating, feeling) in enumerate(match1[:self.items_per_page])]\n info_on_page['rating'] = [int(rating.strip(';')) for movie_title, rating, feeling in match1]\n #info_on_page['reason'] = [reason.strip(';') for movie_title, rating, feeling in match1]\n info_on_page['feeling'] = [feeling.strip(';') for movie_title, rating, feeling in match1]\n user_behavior_dict[i] = info_on_page\n\n # @ Add new training data.\n # new_train = [id_on_page[idx] for idx, movie, reason in like_movies] # Add all liked item ids in the validation set to the training set.\n tmp = [(idx, movie_title.strip(';'), feeling.strip(';')) for idx, (movie_title, rating, feeling) in enumerate(match1[:self.items_per_page])]\n new_train = [id_on_page[idx] for idx, movie, reason in tmp]\n self.new_train_dict[avatar_id].extend(new_train)\n\n # @ Record the average number of likes.\n self.n_likes[avatar_id].append(len(new_train))\n # ratings = re.findall(r'RATING: (\\d+)', response)\n ratings = re.findall(r'RATING: (\\d+);', response)\n average_rating = sum([int(rating.strip(';')) for rating in ratings])/max(len(watched_movies), 1)\n # Add the average score of this page.\n self.ratings[avatar_id].append(average_rating)\n self.watch[avatar_id].extend([movie for movie in watched_movies])\n\n # @ Calculate the precision on this page and save it.\n ground_truth = [id_on_page[idx] for idx, movie in enumerate(movies_on_page) if id_on_page[idx] in self.data.valid_user_list[avatar_id]]\n # print(like_movies, ground_truth)\n perf = (len(set(new_train) & set(ground_truth)), len(new_train), len(ground_truth))\n self.perf_per_page[avatar_id].append(perf)\n #==============================================\n\n vars.global_finished_pages += 1\n\n # @ Force exit if the number of pages exceeds the maximum limit.\n if(i >= self.max_pages):\n avatar_.exit_flag = True\n \n interview_response = avatar_.response_to_question(\"Do you feel satisfied with the recommender system you have just interacted? Rate this recommender system from 1-10 and give explanation.\\n Please use this respond format: RATING: [integer between 1 and 10]; REASON: [explanation]; In RATING part just give your rating and other reason and explanation should included in the REASON part.\", remember=False)\n # Extract RAING and REASON using re.\n pattern_interview = re.compile(r'RATING:\\s*(.*?)\\s*REASON:\\s*(.*?)')\n # pattern_interview = re.compile(r'RATING:\\s*(.*?)\\s*REASON:\\s*(.*?)')\n #pattern = re.compile(r'MOVIE:\\s*(.*?)\\s*WATCH:\\s*(.*?)\\s*REASON:\\s*(.*?)\\s*RATING:\\s*(.*?)\\s*FEELING:(.*?)')\n matches_interview = re.findall(r'(?<=RATING:|REASON:).*', interview_response)\n user_interview_dict['interview'] = matches_interview\n print(matches_interview)\n self.exit_page[avatar_id] = i\n self.finished_num += 1\n self.remaining_users.remove(avatar_id)\n remaining = \", \".join([str(u) for u in self.remaining_users])\n\n end_time = time.time()\n time_local = time.localtime(end_time)\n l_end = time.strftime(\"%Y-%m-%d %H:%M:%S\",time_local)\n vars.global_finished_users += 1\n with open(self.storage_base_path + \"/system_log.txt\", 'a') as f:\n f.write(f\"Start: {l_start} End: {l_end}. User {avatar_id} finished after {i} pages. [{self.finished_num} / {self.n_avatars}]. Total token cost: {round(self.avatars[avatar_id].memory.user_k_tokens, 2)}k. Taking {round(time.time() - start_time, 2)}s\\n\")\n f.write(f\"Remaining users: {remaining}\\n\")\n\n # @ Save the behavior of each individual.\n behavior_path = self.storage_base_path+ \"/behavior\"\n if not os.path.exists(behavior_path):\n os.makedirs(behavior_path)\n with open(behavior_path + f\"/{avatar_id}.pkl\", 'wb') as f:\n pickle.dump(user_behavior_dict, f)\n\n interview_path = self.storage_base_path+ \"/interview\"\n if not os.path.exists(interview_path):\n os.makedirs(interview_path)\n with open(interview_path + f\"/{avatar_id}.pkl\", 'wb') as f:\n pickle.dump(user_interview_dict, f)\n\n def simulate_one_avatar(self, avatar_id):\n \"\"\"\n excute the simulation for one avatar\n avatar_id: the id of the simulated avatar\n \"\"\"\n # print(\"\\nIs simulating avatar {}\".format(avatar_id))\n avatar_ = self.avatars[avatar_id]\n avatar_.write_log(f\"Is simulating avatar {avatar_id}\")\n avatar_.exit_flag = False\n page_generator = self.page_generator(avatar_id)\n while not avatar_.exit_flag:\n # for i in range(2):\n id_on_page = next(page_generator, []) # get the next page, a list of item ids\n if(len(id_on_page) == 0):\n break\n\n movies_on_page = [self.movie_detail[idx] for idx in id_on_page]\n avatar_.write_log(\"============= Recommendation Page =============\")\n for idx, movie in enumerate(movies_on_page):\n if(id_on_page[idx] in self.data.valid_user_list[avatar_id]):\n avatar_.write_log(f\"== {movie} (√)\", \"blue\", attrs=[\"bold\"])\n else:\n avatar_.write_log(f\"== {movie}\")\n avatar_.write_log(\"============= End Page =============\")\n avatar_.write_log(\"\")\n \n #@ most important\n response = avatar_.reaction_to_recommended_items(movies_on_page)\n\n avatar_.write_log(\"\")\n avatar_.write_log(\"============= Avatar Response =============\")\n avatar_.write_log(response, color='yellow', attrs=None)\n \n def parse_response(self, response):\n #pattern = re.compile(r'MOVIE:\\s*(.*?)\\s*WATCH:\\s*(.*?)\\s*REASON:\\s*(.*?)\\s*FEELING:\\s*(.*?)\\s*RATING:\\s*(\\d)')\n pattern = re.compile(r'MOVIE:\\s*(.*?)\\s*WATCH:\\s*(.*?)\\s*REASON:\\s*(.*?)\\s*RATING:\\s*(.*?)\\s*FEELING:(.*?)')\n matches = re.findall(pattern, response)\n\n watched_movies, watched_movies_contain_id = [], []\n\n for idx, (movie_title, watch, reason, rating, feeling) in enumerate(matches):\n if(self.add_advert and idx == 0 and watch.strip(';') == 'yes'): # If the first one has an advertisement and the user clicked on it.\n self.clicked_adverts += 1\n if(watch.strip(';') == 'yes'):\n watched_movies.append(movie_title.strip(';'))\n print(movie_title, watch, reason, rating, feeling)\n return response\n\n def display_only_adver_item(self, store_path, avatar_id, i, id_on_page, movies_on_page):\n store_path = op.join(store_path, f\"avatar{avatar_id}_{i}.txt\")\n try:\n with open(store_path, 'r') as f1:\n random_key = int(f1.read())\n except:\n try:\n store_path_minus_1 = op.join(store_path, f\"avatar{avatar_id}_{i-1}.txt\")\n with open(store_path_minus_1, 'r') as f2:\n random_key = int(f2.read())\n except:\n store_path_minus_2 = op.join(store_path, f\"avatar{avatar_id}_{i-2}.txt\")\n with open(store_path_minus_2, 'r') as f3:\n random_key = int(f3.read())\n try:\n store_path_minus_3 = op.join(store_path, f\"avatar{avatar_id}_{i-3}.txt\")\n with open(store_path_minus_3, 'r') as f4:\n random_key = int(f4.read())\n except:\n store_path_minus_4 = op.join(store_path, f\"avatar{avatar_id}_{i-4}.txt\")\n with open(store_path_minus_4, 'r') as f5:\n random_key = int(f5.read())\n\n\n self.total_adverts += 1\n id_on_page[0] = random_key\n movies_on_page[0] = self.movie_detail.loc[random_key]\n adver_information = self.advert[random_key]\n\n return ( \"<- \" + adver_information['title'] + \" ->\" \n + \" <- History ratings:\" + str(round(adver_information['rating'], 2)) + \" ->\"\n + \" <- Summary:\" + adver_information['summary'] + \" ->\" + \"\\n\"), id_on_page, movies_on_page\n\n def display_item_with_adver(self, store_path, avatar_id, i, id_on_page, movies_on_page):\n store_path = op.join(store_path, f\"avatar{avatar_id}_{i}.txt\")\n random_key = np.random.choice(list(self.advert.keys()))\n self.total_adverts += 1\n random_advert = self.advert[random_key]\n id_on_page[0] = random_key\n movies_on_page[0] = self.movie_detail.loc[random_key]\n advert_item_id = random_key\n\n with open(store_path, 'w') as f:\n f.write(f\"{advert_item_id}\")\n \n return ( self.advert_word \n + \"<- \" + random_advert['title'] + \" ->\" \n + \"<- \" + random_advert['review'] + \" ->\"\n + \" <- History ratings:\" + str(round(random_advert['rating'], 2)) + \" ->\" \n + \" <- Summary:\" + random_advert['summary'] + \" ->\" + \"\\n\"), id_on_page, movies_on_page\n\n def save_results(self):\n \"\"\"\n save the results of the simulation\n \"\"\"\n # if(self.n_avatars == self.data.n_users):\n def save_user_dict_to_txt(user_dict, base_path, filename):\n with open(base_path + filename, 'w') as f:\n for u, v in user_dict.items():\n f.write(str(int(u)))\n for i in v:\n f.write(' ' + str(int(i)))\n f.write('\\n')\n\n # save_path = f\"datasets/{self.dataset}_{self.modeltype}/cf_data/\"\n save_path = f\"storage/{self.dataset}/{self.modeltype}/{self.simulation_name}/\"\n save_user_dict_to_txt(self.new_train_dict, save_path, 'train.txt')\n\n # @ Save overall evaluation indicators.\n # Average number of clicks per user\n cprint(\"Number of likes\", color='green', attrs=['bold'])\n cprint(self.n_likes, color='green', attrs=['bold'])\n average_n_likes = {avatar_id:np.mean(n_likes) for avatar_id, n_likes in self.n_likes.items()}\n cprint(average_n_likes, color='green', attrs=['bold'])\n\n overall_n_likes = np.mean(list(average_n_likes.values()))\n cprint(f\"\\nOverall number of likes: {overall_n_likes}\", color='green', attrs=['bold'])\n\n # Average satisfaction\n cprint(\"\\nRatings\", color='green', attrs=['bold'])\n cprint(self.ratings, color='green', attrs=['bold'])\n average_ratings = {avatar_id:np.mean(ratings) for avatar_id, ratings in self.ratings.items()}\n cprint(average_ratings, color='green', attrs=['bold'])\n\n # @ Save average click-through rate\n average_click_rate = {avatar_id:len(movies)/(self.max_pages*self.items_per_page) for avatar_id, movies in self.watch.items()}\n cprint(f\"\\nAverage click rate: {average_click_rate}\", color='green', attrs=['bold'])\n overall_click_rate = np.mean(list(average_click_rate.values()))\n cprint(f\"\\nOverall satisfaction: {overall_click_rate}\", color='green', attrs=['bold']) # Average click-through rate\n\n # overall_click_rate = np.mean(list(average_ratings.values()))\n # cprint(f\"\\nOverall satisfaction: {overall_click_rate}\", color='green', attrs=['bold'])\n\n # Average exit page\n mean_exit_page = np.mean(list(self.exit_page.values()))\n cprint(\"\\nExit pages\", color='green', attrs=['bold'])\n cprint(self.exit_page, color='green', attrs=['bold'])\n cprint(f\"Average exit page: {mean_exit_page}\", color='green', attrs=['bold'])\n\n # Average precision and recall\n cprint(\"\\nPrecision and recall\", color='green', attrs=['bold'])\n cprint(self.perf_per_page, color=\"green\", attrs=['bold'])\n total_perf = {avatar_id:[sum([i for i, j, k in perf_per_page]), sum([j for i, j, k in perf_per_page]), sum([k for i, j, k in perf_per_page])] for avatar_id, perf_per_page in self.perf_per_page.items()}\n total_recall_precision = {avatar_id:(perf[0]/max(perf[1], 1), perf[0]/max(perf[2], 1)) for avatar_id, perf in total_perf.items()}\n cprint(total_perf, color=\"green\", attrs=['bold'])\n cprint(total_recall_precision, color=\"green\", attrs=['bold'])\n average_precision = np.mean([metrics[0] for avatar_id, metrics in total_recall_precision.items()])\n average_recall = np.mean([metrics[1] for avatar_id, metrics in total_recall_precision.items()])\n cprint(f\"Precision: {average_precision} Recall: {average_recall}\", color=\"green\", attrs=['bold'])\n # metrics_path = self.storage_base_path + \"/metrics.txt\"\n total_k_tokens = sum([self.avatars[i].memory.user_k_tokens for i in range(self.n_avatars)])\n\n # Effective advertising rate\n if(self.add_advert):\n cprint(\"\\nAdvert\", color='green', attrs=['bold'])\n cprint(f\"Total advert: {self.total_adverts}\", color='green', attrs=['bold'])\n cprint(f\"Clicked advert: {self.clicked_adverts}\", color='green', attrs=['bold'])\n cprint(f\"Advert click rate: {self.clicked_adverts/self.total_adverts}\", color='green', attrs=['bold'])\n\n end_time = time.strftime(\"%Y-%m-%d %H:%M:%S\",time.localtime(time.time()))\n with open(self.storage_base_path + \"/metrics.txt\", 'w') as f:\n f.write(f\"Finished time: {end_time}\\n\")\n f.write(f\"Total simulation time: {round(time.time() - self.start_time, 2)}s\\n\")\n f.write(f\"n_avatars: {self.n_avatars}\\n\")\n f.write(f\"Average recall: {average_recall}\\n\")\n f.write(f\"Average presion: {average_precision}\\n\")\n f.write(f\"Total k tokens: {round(total_k_tokens, 2)}k tokens\\n\")\n f.write(f\"Total cost: {round(total_k_tokens*0.0018, 2)} \\n\")\n # f.write(f\"Average precision: {}\")\n f.write(f\"Maximum exit page: {self.max_pages}\\n\")\n f.write(f\"Overall click rate: {overall_click_rate}\\n\")\n f.write(f\"Average number of likes: {overall_n_likes}\\n\")\n f.write(f\"Average exit page: {mean_exit_page}\\n\")\n if(self.add_advert):\n f.write(f\"Total advert: {self.total_adverts}\\n\")\n f.write(f\"Clicked advert: {self.clicked_adverts}\\n\")\n f.write(f\"Advert click rate: {self.clicked_adverts/self.total_adverts}\\n\")" } ]
import numpy as np import os import wandb from tqdm import tqdm from parse import parse_args from simulation.utils import fix_seeds from simulation.avatar import Avatar from simulation.arena import Arena
12,740
# load model if __name__ == '__main__': args = parse_args() # print(args) fix_seeds(args.seed) # set random seed if(args.use_wandb): wandb.init( # set the wandb project where this run will be logged project = "sandbox", name = args.simulation_name, group = args.dataset )
# load model if __name__ == '__main__': args = parse_args() # print(args) fix_seeds(args.seed) # set random seed if(args.use_wandb): wandb.init( # set the wandb project where this run will be logged project = "sandbox", name = args.simulation_name, group = args.dataset )
arena_ = Arena(args)
3
2023-10-12 02:33:22+00:00
16k
Beckschen/3D-TransUNet
nn_transunet/trainer/network_trainer.py
[ { "identifier": "SegmentationNetwork", "path": "nn_transunet/networks/neural_network.py", "snippet": "class SegmentationNetwork(NeuralNetwork):\n def __init__(self):\n super(NeuralNetwork, self).__init__()\n\n # if we have 5 pooling then our patch size must be divisible by 2**5\n # for example in a 2d network that does 5 pool in x and 6 pool\n self.input_shape_must_be_divisible_by = None\n # in y this would be (32, 64)\n\n # we need to know this because we need to know if we are a 2d or a 3d netowrk\n self.conv_op = None # nn.Conv2d or nn.Conv3d\n\n # this tells us how many channely we have in the output. Important for preallocation in inference\n self.num_classes = None # number of channels in the output\n\n # depending on the loss, we do not hard code a nonlinearity into the architecture. To aggregate predictions\n # during inference, we need to apply the nonlinearity, however. So it is important to let the newtork know what\n # to apply in inference. For the most part this will be softmax\n self.inference_apply_nonlin = lambda x: x # softmax_helper\n\n # This is for saving a gaussian importance map for inference. It weights voxels higher that are closer to the\n # center. Prediction at the borders are often less accurate and are thus downweighted. Creating these Gaussians\n # can be expensive, so it makes sense to save and reuse them.\n self._gaussian_3d = self._patch_size_for_gaussian_3d = None\n self._gaussian_2d = self._patch_size_for_gaussian_2d = None\n\n def predict_3D(self, x: np.ndarray, do_mirroring: bool, mirror_axes: Tuple[int, ...] = (0, 1, 2),\n use_sliding_window: bool = False,\n step_size: float = 0.5, patch_size: Tuple[int, ...] = None, regions_class_order: Tuple[int, ...] = None,\n use_gaussian: bool = False, pad_border_mode: str = \"constant\",\n pad_kwargs: dict = None, all_in_gpu: bool = False,\n verbose: bool = True, mixed_precision: bool = True) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Use this function to predict a 3D image. It does not matter whether the network is a 2D or 3D U-Net, it will\n detect that automatically and run the appropriate code.\n When running predictions, you need to specify whether you want to run fully convolutional of sliding window\n based inference. We very strongly recommend you use sliding window with the default settings.\n It is the responsibility of the user to make sure the network is in the proper mode (eval for inference!). If\n the network is not in eval mode it will print a warning.\n :param x: Your input data. Must be a nd.ndarray of shape (c, x, y, z).\n :param do_mirroring: If True, use test time data augmentation in the form of mirroring\n :param mirror_axes: Determines which axes to use for mirroing. Per default, mirroring is done along all three\n axes\n :param use_sliding_window: if True, run sliding window prediction. Heavily recommended! This is also the default\n :param step_size: When running sliding window prediction, the step size determines the distance between adjacent\n predictions. The smaller the step size, the denser the predictions (and the longer it takes!). Step size is given\n as a fraction of the patch_size. 0.5 is the default and means that wen advance by patch_size * 0.5 between\n predictions. step_size cannot be larger than 1!\n :param patch_size: The patch size that was used for training the network. Do not use different patch sizes here,\n this will either crash or give potentially less accurate segmentations\n :param regions_class_order: Fabian only\n :param use_gaussian: (Only applies to sliding window prediction) If True, uses a Gaussian importance weighting\n to weigh predictions closer to the center of the current patch higher than those at the borders. The reason\n behind this is that the segmentation accuracy decreases towards the borders. Default (and recommended): True\n :param pad_border_mode: leave this alone\n :param pad_kwargs: leave this alone\n :param all_in_gpu: experimental. You probably want to leave this as is it\n :param verbose: Do you want a wall of text? If yes then set this to True\n :param mixed_precision: if True, will run inference in mixed precision with autocast()\n :return:\n \"\"\"\n torch.cuda.empty_cache()\n\n assert step_size <= 1, 'step_size must be smaller than 1. Otherwise there will be a gap between consecutive ' \\\n 'predictions'\n\n if verbose:\n print(\"debug: mirroring\", do_mirroring, \"mirror_axes\", mirror_axes)\n\n assert self.get_device() != \"cpu\", \"CPU not implemented\"\n\n if pad_kwargs is None:\n pad_kwargs = {'constant_values': 0}\n\n # A very long time ago the mirror axes were (2, 3, 4) for a 3d network. This is just to intercept any old\n # code that uses this convention\n if len(mirror_axes):\n if self.conv_op == nn.Conv2d:\n if max(mirror_axes) > 1:\n raise ValueError(\"mirror axes. duh\")\n if self.conv_op == nn.Conv3d:\n if max(mirror_axes) > 2:\n raise ValueError(\"mirror axes. duh\")\n\n if self.training:\n print(\n 'WARNING! Network is in train mode during inference. This may be intended, or not...')\n\n assert len(x.shape) == 4, \"data must have shape (c,x,y,z)\"\n\n if mixed_precision:\n context = autocast\n else:\n context = no_op\n\n with context():\n with torch.no_grad():\n if self.conv_op == nn.Conv3d:\n if use_sliding_window:\n res = self._internal_predict_3D_3Dconv_tiled(x, step_size, do_mirroring, mirror_axes, patch_size,\n regions_class_order, use_gaussian, pad_border_mode,\n pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu,\n verbose=verbose)\n else:\n res = self._internal_predict_3D_3Dconv(x, patch_size, do_mirroring, mirror_axes, regions_class_order,\n pad_border_mode, pad_kwargs=pad_kwargs, verbose=verbose)\n elif self.conv_op == nn.Conv2d:\n if use_sliding_window:\n res = self._internal_predict_3D_2Dconv_tiled(x, patch_size, do_mirroring, mirror_axes, step_size,\n regions_class_order, use_gaussian, pad_border_mode,\n pad_kwargs, all_in_gpu, False)\n else:\n res = self._internal_predict_3D_2Dconv(x, patch_size, do_mirroring, mirror_axes, regions_class_order,\n pad_border_mode, pad_kwargs, all_in_gpu, False)\n else:\n raise RuntimeError(\n \"Invalid conv op, cannot determine what dimensionality (2d/3d) the network is\")\n\n return res\n\n def predict_2D(self, x, do_mirroring: bool, mirror_axes: tuple = (0, 1, 2), use_sliding_window: bool = False,\n step_size: float = 0.5, patch_size: tuple = None, regions_class_order: tuple = None,\n use_gaussian: bool = False, pad_border_mode: str = \"constant\",\n pad_kwargs: dict = None, all_in_gpu: bool = False,\n verbose: bool = True, mixed_precision: bool = True) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Use this function to predict a 2D image. If this is a 3D U-Net it will crash because you cannot predict a 2D\n image with that (you dummy).\n When running predictions, you need to specify whether you want to run fully convolutional of sliding window\n based inference. We very strongly recommend you use sliding window with the default settings.\n It is the responsibility of the user to make sure the network is in the proper mode (eval for inference!). If\n the network is not in eval mode it will print a warning.\n :param x: Your input data. Must be a nd.ndarray of shape (c, x, y).\n :param do_mirroring: If True, use test time data augmentation in the form of mirroring\n :param mirror_axes: Determines which axes to use for mirroing. Per default, mirroring is done along all three\n axes\n :param use_sliding_window: if True, run sliding window prediction. Heavily recommended! This is also the default\n :param step_size: When running sliding window prediction, the step size determines the distance between adjacent\n predictions. The smaller the step size, the denser the predictions (and the longer it takes!). Step size is given\n as a fraction of the patch_size. 0.5 is the default and means that wen advance by patch_size * 0.5 between\n predictions. step_size cannot be larger than 1!\n :param patch_size: The patch size that was used for training the network. Do not use different patch sizes here,\n this will either crash or give potentially less accurate segmentations\n :param regions_class_order: Fabian only\n :param use_gaussian: (Only applies to sliding window prediction) If True, uses a Gaussian importance weighting\n to weigh predictions closer to the center of the current patch higher than those at the borders. The reason\n behind this is that the segmentation accuracy decreases towards the borders. Default (and recommended): True\n :param pad_border_mode: leave this alone\n :param pad_kwargs: leave this alone\n :param all_in_gpu: experimental. You probably want to leave this as is it\n :param verbose: Do you want a wall of text? If yes then set this to True\n :return:\n \"\"\"\n torch.cuda.empty_cache()\n\n assert step_size <= 1, 'step_size must be smaler than 1. Otherwise there will be a gap between consecutive ' \\\n 'predictions'\n\n if self.conv_op == nn.Conv3d:\n raise RuntimeError(\n \"Cannot predict 2d if the network is 3d. Dummy.\")\n\n if verbose:\n print(\"debug: mirroring\", do_mirroring, \"mirror_axes\", mirror_axes)\n\n assert self.get_device() != \"cpu\", \"CPU not implemented\"\n\n if pad_kwargs is None:\n pad_kwargs = {'constant_values': 0}\n\n # A very long time ago the mirror axes were (2, 3) for a 2d network. This is just to intercept any old\n # code that uses this convention\n if len(mirror_axes):\n if max(mirror_axes) > 1:\n raise ValueError(\"mirror axes. duh\")\n\n if self.training:\n print(\n 'WARNING! Network is in train mode during inference. This may be intended, or not...')\n\n assert len(x.shape) == 3, \"data must have shape (c,x,y)\"\n\n if mixed_precision:\n context = autocast\n else:\n context = no_op\n\n with context():\n with torch.no_grad():\n if self.conv_op == nn.Conv2d:\n if use_sliding_window:\n res = self._internal_predict_2D_2Dconv_tiled(x, step_size, do_mirroring, mirror_axes, patch_size,\n regions_class_order, use_gaussian, pad_border_mode,\n pad_kwargs, all_in_gpu, verbose)\n else:\n res = self._internal_predict_2D_2Dconv(x, patch_size, do_mirroring, mirror_axes, regions_class_order,\n pad_border_mode, pad_kwargs, verbose)\n else:\n raise RuntimeError(\n \"Invalid conv op, cannot determine what dimensionality (2d/3d) the network is\")\n\n return res\n\n @staticmethod\n def _get_gaussian(patch_size, sigma_scale=1. / 8) -> np.ndarray:\n tmp = np.zeros(patch_size)\n center_coords = [i // 2 for i in patch_size]\n sigmas = [i * sigma_scale for i in patch_size]\n tmp[tuple(center_coords)] = 1\n gaussian_importance_map = gaussian_filter(\n tmp, sigmas, 0, mode='constant', cval=0)\n gaussian_importance_map = gaussian_importance_map / \\\n np.max(gaussian_importance_map) * 1\n gaussian_importance_map = gaussian_importance_map.astype(np.float32)\n\n # gaussian_importance_map cannot be 0, otherwise we may end up with nans!\n gaussian_importance_map[gaussian_importance_map == 0] = np.min(\n gaussian_importance_map[gaussian_importance_map != 0])\n\n return gaussian_importance_map\n\n @staticmethod\n def _compute_steps_for_sliding_window(patch_size: Tuple[int, ...], image_size: Tuple[int, ...], step_size: float) -> List[List[int]]:\n assert [i >= j for i, j in zip(\n image_size, patch_size)], \"image size must be as large or larger than patch_size\"\n assert 0 < step_size <= 1, 'step_size must be larger than 0 and smaller or equal to 1'\n\n # our step width is patch_size*step_size at most, but can be narrower. For example if we have image size of\n # 110, patch size of 64 and step_size of 0.5, then we want to make 3 steps starting at coordinate 0, 23, 46\n target_step_sizes_in_voxels = [i * step_size for i in patch_size]\n\n num_steps = [int(np.ceil((i - k) / j)) + 1 for i, j,\n k in zip(image_size, target_step_sizes_in_voxels, patch_size)]\n\n steps = []\n for dim in range(len(patch_size)):\n # the highest step value for this dimension is\n max_step_value = image_size[dim] - patch_size[dim]\n if num_steps[dim] > 1:\n actual_step_size = max_step_value / (num_steps[dim] - 1)\n else:\n # does not matter because there is only one step at 0\n actual_step_size = 99999999999\n\n steps_here = [int(np.round(actual_step_size * i))\n for i in range(num_steps[dim])]\n\n steps.append(steps_here)\n\n return steps\n\n def _internal_predict_3D_3Dconv_tiled(self, x: np.ndarray, step_size: float, do_mirroring: bool, mirror_axes: tuple,\n patch_size: tuple, regions_class_order: tuple, use_gaussian: bool,\n pad_border_mode: str, pad_kwargs: dict, all_in_gpu: bool,\n verbose: bool) -> Tuple[np.ndarray, np.ndarray]:\n # better safe than sorry\n assert len(x.shape) == 4, \"x must be (c, x, y, z)\"\n assert self.get_device() != \"cpu\"\n if verbose:\n print(\"step_size:\", step_size)\n if verbose:\n print(\"do mirror:\", do_mirroring)\n\n assert patch_size is not None, \"patch_size cannot be None for tiled prediction\"\n\n # for sliding window inference the image must at least be as large as the patch size. It does not matter\n # whether the shape is divisible by 2**num_pool as long as the patch size is\n data, slicer = pad_nd_image(\n x, patch_size, pad_border_mode, pad_kwargs, True, None)\n data_shape = data.shape # still c, x, y, z\n\n # compute the steps for sliding window\n steps = self._compute_steps_for_sliding_window(\n patch_size, data_shape[1:], step_size)\n num_tiles = len(steps[0]) * len(steps[1]) * len(steps[2])\n\n if verbose:\n print(\"data shape:\", data_shape)\n print(\"patch size:\", patch_size)\n print(\"steps (x, y, and z):\", steps)\n print(\"number of tiles:\", num_tiles)\n\n # we only need to compute that once. It can take a while to compute this due to the large sigma in\n # gaussian_filter\n if use_gaussian and num_tiles > 1:\n if self._gaussian_3d is None or not all(\n [i == j for i, j in zip(patch_size, self._patch_size_for_gaussian_3d)]):\n if verbose:\n print('computing Gaussian')\n gaussian_importance_map = self._get_gaussian(\n patch_size, sigma_scale=1. / 8)\n\n self._gaussian_3d = gaussian_importance_map\n self._patch_size_for_gaussian_3d = patch_size\n else:\n if verbose:\n print(\"using precomputed Gaussian\")\n gaussian_importance_map = self._gaussian_3d\n\n gaussian_importance_map = torch.from_numpy(gaussian_importance_map).cuda(self.get_device(),\n non_blocking=True)\n\n else:\n gaussian_importance_map = None\n\n if all_in_gpu:\n # If we run the inference in GPU only (meaning all tensors are allocated on the GPU, this reduces\n # CPU-GPU communication but required more GPU memory) we need to preallocate a few things on GPU\n\n if use_gaussian and num_tiles > 1:\n # half precision for the outputs should be good enough. If the outputs here are half, the\n # gaussian_importance_map should be as well\n gaussian_importance_map = gaussian_importance_map.half()\n\n # make sure we did not round anything to 0\n gaussian_importance_map[gaussian_importance_map == 0] = gaussian_importance_map[\n gaussian_importance_map != 0].min()\n\n add_for_nb_of_preds = gaussian_importance_map\n else:\n add_for_nb_of_preds = torch.ones(\n data.shape[1:], device=self.get_device())\n\n if verbose:\n print(\"initializing result array (on GPU)\")\n aggregated_results = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,\n device=self.get_device())\n\n if verbose:\n print(\"moving data to GPU\")\n data = torch.from_numpy(data).cuda(\n self.get_device(), non_blocking=True)\n\n if verbose:\n print(\"initializing result_numsamples (on GPU)\")\n aggregated_nb_of_predictions = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,\n device=self.get_device())\n else:\n if use_gaussian and num_tiles > 1:\n add_for_nb_of_preds = self._gaussian_3d\n else:\n add_for_nb_of_preds = np.ones(data.shape[1:], dtype=np.float32)\n aggregated_results = np.zeros(\n [self.num_classes] + list(data.shape[1:]), dtype=np.float32)\n aggregated_nb_of_predictions = np.zeros(\n [self.num_classes] + list(data.shape[1:]), dtype=np.float32)\n\n for x in steps[0]:\n lb_x = x\n ub_x = x + patch_size[0]\n for y in steps[1]:\n lb_y = y\n ub_y = y + patch_size[1]\n for z in steps[2]:\n lb_z = z\n ub_z = z + patch_size[2]\n\n predicted_patch = self._internal_maybe_mirror_and_pred_3D(\n data[None, :, lb_x:ub_x, lb_y:ub_y,\n lb_z:ub_z], mirror_axes, do_mirroring,\n gaussian_importance_map)[0]\n\n if all_in_gpu:\n predicted_patch = predicted_patch.half()\n else:\n predicted_patch = predicted_patch.cpu().numpy()\n\n aggregated_results[:, lb_x:ub_x,\n lb_y:ub_y, lb_z:ub_z] += predicted_patch\n aggregated_nb_of_predictions[:, lb_x:ub_x,\n lb_y:ub_y, lb_z:ub_z] += add_for_nb_of_preds\n\n # we reverse the padding here (remeber that we padded the input to be at least as large as the patch size\n slicer = tuple(\n [slice(0, aggregated_results.shape[i]) for i in\n range(len(aggregated_results.shape) - (len(slicer) - 1))] + slicer[1:])\n aggregated_results = aggregated_results[slicer]\n aggregated_nb_of_predictions = aggregated_nb_of_predictions[slicer]\n\n # computing the class_probabilities by dividing the aggregated result with result_numsamples\n class_probabilities = aggregated_results / aggregated_nb_of_predictions\n\n if regions_class_order is None:\n predicted_segmentation = class_probabilities.argmax(0)\n else:\n if all_in_gpu:\n class_probabilities_here = class_probabilities.detach().cpu().numpy()\n else:\n class_probabilities_here = class_probabilities\n predicted_segmentation = np.zeros(\n class_probabilities_here.shape[1:], dtype=np.float32)\n for i, c in enumerate(regions_class_order):\n predicted_segmentation[class_probabilities_here[i] > 0.5] = c\n\n if all_in_gpu:\n if verbose:\n print(\"copying results to CPU\")\n\n if regions_class_order is None:\n predicted_segmentation = predicted_segmentation.detach().cpu().numpy()\n\n class_probabilities = class_probabilities.detach().cpu().numpy()\n\n if verbose:\n print(\"prediction done\")\n return predicted_segmentation, class_probabilities\n\n def _internal_predict_2D_2Dconv(self, x: np.ndarray, min_size: Tuple[int, int], do_mirroring: bool,\n mirror_axes: tuple = (0, 1, 2), regions_class_order: tuple = None,\n pad_border_mode: str = \"constant\", pad_kwargs: dict = None,\n verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n This one does fully convolutional inference. No sliding window\n \"\"\"\n assert len(x.shape) == 3, \"x must be (c, x, y)\"\n assert self.get_device() != \"cpu\"\n assert self.input_shape_must_be_divisible_by is not None, 'input_shape_must_be_divisible_by must be set to ' \\\n 'run _internal_predict_2D_2Dconv'\n if verbose:\n print(\"do mirror:\", do_mirroring)\n\n data, slicer = pad_nd_image(x, min_size, pad_border_mode, pad_kwargs, True,\n self.input_shape_must_be_divisible_by)\n\n predicted_probabilities = self._internal_maybe_mirror_and_pred_2D(data[None], mirror_axes, do_mirroring,\n None)[0]\n\n slicer = tuple(\n [slice(0, predicted_probabilities.shape[i]) for i in range(len(predicted_probabilities.shape) -\n (len(slicer) - 1))] + slicer[1:])\n predicted_probabilities = predicted_probabilities[slicer]\n\n if regions_class_order is None:\n predicted_segmentation = predicted_probabilities.argmax(0)\n predicted_segmentation = predicted_segmentation.detach().cpu().numpy()\n predicted_probabilities = predicted_probabilities.detach().cpu().numpy()\n else:\n predicted_probabilities = predicted_probabilities.detach().cpu().numpy()\n predicted_segmentation = np.zeros(\n predicted_probabilities.shape[1:], dtype=np.float32)\n for i, c in enumerate(regions_class_order):\n predicted_segmentation[predicted_probabilities[i] > 0.5] = c\n\n return predicted_segmentation, predicted_probabilities\n\n def _internal_predict_3D_3Dconv(self, x: np.ndarray, min_size: Tuple[int, ...], do_mirroring: bool,\n mirror_axes: tuple = (0, 1, 2), regions_class_order: tuple = None,\n pad_border_mode: str = \"constant\", pad_kwargs: dict = None,\n verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n This one does fully convolutional inference. No sliding window\n \"\"\"\n assert len(x.shape) == 4, \"x must be (c, x, y, z)\"\n assert self.get_device() != \"cpu\"\n assert self.input_shape_must_be_divisible_by is not None, 'input_shape_must_be_divisible_by must be set to ' \\\n 'run _internal_predict_3D_3Dconv'\n if verbose:\n print(\"do mirror:\", do_mirroring)\n\n data, slicer = pad_nd_image(x, min_size, pad_border_mode, pad_kwargs, True,\n self.input_shape_must_be_divisible_by)\n\n predicted_probabilities = self._internal_maybe_mirror_and_pred_3D(data[None], mirror_axes, do_mirroring,\n None)[0]\n\n slicer = tuple(\n [slice(0, predicted_probabilities.shape[i]) for i in range(len(predicted_probabilities.shape) -\n (len(slicer) - 1))] + slicer[1:])\n predicted_probabilities = predicted_probabilities[slicer]\n\n if regions_class_order is None:\n predicted_segmentation = predicted_probabilities.argmax(0)\n predicted_segmentation = predicted_segmentation.detach().cpu().numpy()\n predicted_probabilities = predicted_probabilities.detach().cpu().numpy()\n else:\n predicted_probabilities = predicted_probabilities.detach().cpu().numpy()\n predicted_segmentation = np.zeros(\n predicted_probabilities.shape[1:], dtype=np.float32)\n for i, c in enumerate(regions_class_order):\n predicted_segmentation[predicted_probabilities[i] > 0.5] = c\n\n return predicted_segmentation, predicted_probabilities\n\n def _internal_maybe_mirror_and_pred_3D(self, x: Union[np.ndarray, torch.tensor], mirror_axes: tuple,\n do_mirroring: bool = True,\n mult: np.ndarray or torch.tensor = None) -> torch.tensor:\n assert len(x.shape) == 5, 'x must be (b, c, x, y, z)'\n # everything in here takes place on the GPU. If x and mult are not yet on GPU this will be taken care of here\n # we now return a cuda tensor! Not numpy array!\n\n x = to_cuda(maybe_to_torch(x), gpu_id=self.get_device())\n result_torch = torch.zeros([1, self.num_classes] + list(x.shape[2:]),\n dtype=torch.float).cuda(self.get_device(), non_blocking=True)\n\n if mult is not None:\n mult = to_cuda(maybe_to_torch(mult), gpu_id=self.get_device())\n\n if do_mirroring:\n mirror_idx = 8\n num_results = 2 ** len(mirror_axes)\n else:\n mirror_idx = 1\n num_results = 1\n\n for m in range(mirror_idx):\n if m == 0:\n pred = self.inference_apply_nonlin(self(x)) # self(x) - forward\n result_torch += 1 / num_results * pred\n\n if m == 1 and (2 in mirror_axes):\n pred = self.inference_apply_nonlin(self(torch.flip(x, (4, ))))\n result_torch += 1 / num_results * torch.flip(pred, (4,))\n\n if m == 2 and (1 in mirror_axes):\n pred = self.inference_apply_nonlin(self(torch.flip(x, (3, ))))\n result_torch += 1 / num_results * torch.flip(pred, (3,))\n\n if m == 3 and (2 in mirror_axes) and (1 in mirror_axes):\n pred = self.inference_apply_nonlin(self(torch.flip(x, (4, 3))))\n result_torch += 1 / num_results * torch.flip(pred, (4, 3))\n\n if m == 4 and (0 in mirror_axes):\n pred = self.inference_apply_nonlin(self(torch.flip(x, (2, ))))\n result_torch += 1 / num_results * torch.flip(pred, (2,))\n\n if m == 5 and (0 in mirror_axes) and (2 in mirror_axes):\n pred = self.inference_apply_nonlin(self(torch.flip(x, (4, 2))))\n result_torch += 1 / num_results * torch.flip(pred, (4, 2))\n\n if m == 6 and (0 in mirror_axes) and (1 in mirror_axes):\n pred = self.inference_apply_nonlin(self(torch.flip(x, (3, 2))))\n result_torch += 1 / num_results * torch.flip(pred, (3, 2))\n\n if m == 7 and (0 in mirror_axes) and (1 in mirror_axes) and (2 in mirror_axes):\n pred = self.inference_apply_nonlin(\n self(torch.flip(x, (4, 3, 2))))\n result_torch += 1 / num_results * torch.flip(pred, (4, 3, 2))\n\n if mult is not None:\n result_torch[:, :] *= mult\n\n return result_torch\n\n def _internal_maybe_mirror_and_pred_2D(self, x: Union[np.ndarray, torch.tensor], mirror_axes: tuple,\n do_mirroring: bool = True,\n mult: np.ndarray or torch.tensor = None) -> torch.tensor:\n # everything in here takes place on the GPU. If x and mult are not yet on GPU this will be taken care of here\n # we now return a cuda tensor! Not numpy array!\n assert len(x.shape) == 4, 'x must be (b, c, x, y)'\n\n x = to_cuda(maybe_to_torch(x), gpu_id=self.get_device())\n result_torch = torch.zeros([x.shape[0], self.num_classes] + list(x.shape[2:]),\n dtype=torch.float).cuda(self.get_device(), non_blocking=True)\n\n if mult is not None:\n mult = to_cuda(maybe_to_torch(mult), gpu_id=self.get_device())\n\n if do_mirroring:\n mirror_idx = 4\n num_results = 2 ** len(mirror_axes)\n else:\n mirror_idx = 1\n num_results = 1\n\n for m in range(mirror_idx):\n if m == 0:\n pred = self.inference_apply_nonlin(self(x))\n result_torch += 1 / num_results * pred\n\n if m == 1 and (1 in mirror_axes):\n pred = self.inference_apply_nonlin(self(torch.flip(x, (3, ))))\n result_torch += 1 / num_results * torch.flip(pred, (3, ))\n\n if m == 2 and (0 in mirror_axes):\n pred = self.inference_apply_nonlin(self(torch.flip(x, (2, ))))\n result_torch += 1 / num_results * torch.flip(pred, (2, ))\n\n if m == 3 and (0 in mirror_axes) and (1 in mirror_axes):\n pred = self.inference_apply_nonlin(self(torch.flip(x, (3, 2))))\n result_torch += 1 / num_results * torch.flip(pred, (3, 2))\n\n if mult is not None:\n result_torch[:, :] *= mult\n\n return result_torch\n\n def _internal_predict_2D_2Dconv_tiled(self, x: np.ndarray, step_size: float, do_mirroring: bool, mirror_axes: tuple,\n patch_size: tuple, regions_class_order: tuple, use_gaussian: bool,\n pad_border_mode: str, pad_kwargs: dict, all_in_gpu: bool,\n verbose: bool) -> Tuple[np.ndarray, np.ndarray]:\n # better safe than sorry\n assert len(x.shape) == 3, \"x must be (c, x, y)\"\n assert self.get_device() != \"cpu\"\n if verbose:\n print(\"step_size:\", step_size)\n if verbose:\n print(\"do mirror:\", do_mirroring)\n\n assert patch_size is not None, \"patch_size cannot be None for tiled prediction\"\n\n # for sliding window inference the image must at least be as large as the patch size. It does not matter\n # whether the shape is divisible by 2**num_pool as long as the patch size is\n data, slicer = pad_nd_image(\n x, patch_size, pad_border_mode, pad_kwargs, True, None)\n data_shape = data.shape # still c, x, y\n\n # compute the steps for sliding window\n steps = self._compute_steps_for_sliding_window(\n patch_size, data_shape[1:], step_size)\n num_tiles = len(steps[0]) * len(steps[1])\n\n if verbose:\n print(\"data shape:\", data_shape)\n print(\"patch size:\", patch_size)\n print(\"steps (x, y, and z):\", steps)\n print(\"number of tiles:\", num_tiles)\n\n # we only need to compute that once. It can take a while to compute this due to the large sigma in\n # gaussian_filter\n if use_gaussian and num_tiles > 1:\n if self._gaussian_2d is None or not all(\n [i == j for i, j in zip(patch_size, self._patch_size_for_gaussian_2d)]):\n if verbose:\n print('computing Gaussian')\n gaussian_importance_map = self._get_gaussian(\n patch_size, sigma_scale=1. / 8)\n\n self._gaussian_2d = gaussian_importance_map\n self._patch_size_for_gaussian_2d = patch_size\n else:\n if verbose:\n print(\"using precomputed Gaussian\")\n gaussian_importance_map = self._gaussian_2d\n\n gaussian_importance_map = torch.from_numpy(gaussian_importance_map).cuda(self.get_device(),\n non_blocking=True)\n else:\n gaussian_importance_map = None\n\n if all_in_gpu:\n # If we run the inference in GPU only (meaning all tensors are allocated on the GPU, this reduces\n # CPU-GPU communication but required more GPU memory) we need to preallocate a few things on GPU\n\n if use_gaussian and num_tiles > 1:\n # half precision for the outputs should be good enough. If the outputs here are half, the\n # gaussian_importance_map should be as well\n gaussian_importance_map = gaussian_importance_map.half()\n\n # make sure we did not round anything to 0\n gaussian_importance_map[gaussian_importance_map == 0] = gaussian_importance_map[\n gaussian_importance_map != 0].min()\n\n add_for_nb_of_preds = gaussian_importance_map\n else:\n add_for_nb_of_preds = torch.ones(\n data.shape[1:], device=self.get_device())\n\n if verbose:\n print(\"initializing result array (on GPU)\")\n aggregated_results = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,\n device=self.get_device())\n\n if verbose:\n print(\"moving data to GPU\")\n data = torch.from_numpy(data).cuda(\n self.get_device(), non_blocking=True)\n\n if verbose:\n print(\"initializing result_numsamples (on GPU)\")\n aggregated_nb_of_predictions = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,\n device=self.get_device())\n else:\n if use_gaussian and num_tiles > 1:\n add_for_nb_of_preds = self._gaussian_2d\n else:\n add_for_nb_of_preds = np.ones(data.shape[1:], dtype=np.float32)\n aggregated_results = np.zeros(\n [self.num_classes] + list(data.shape[1:]), dtype=np.float32)\n aggregated_nb_of_predictions = np.zeros(\n [self.num_classes] + list(data.shape[1:]), dtype=np.float32)\n\n for x in steps[0]:\n lb_x = x\n ub_x = x + patch_size[0]\n for y in steps[1]:\n lb_y = y\n ub_y = y + patch_size[1]\n\n predicted_patch = self._internal_maybe_mirror_and_pred_2D(\n data[None, :, lb_x:ub_x, lb_y:ub_y], mirror_axes, do_mirroring,\n gaussian_importance_map)[0]\n\n if all_in_gpu:\n predicted_patch = predicted_patch.half()\n else:\n predicted_patch = predicted_patch.cpu().numpy()\n\n aggregated_results[:, lb_x:ub_x, lb_y:ub_y] += predicted_patch\n aggregated_nb_of_predictions[:, lb_x:ub_x,\n lb_y:ub_y] += add_for_nb_of_preds\n\n # we reverse the padding here (remeber that we padded the input to be at least as large as the patch size\n slicer = tuple(\n [slice(0, aggregated_results.shape[i]) for i in\n range(len(aggregated_results.shape) - (len(slicer) - 1))] + slicer[1:])\n aggregated_results = aggregated_results[slicer]\n aggregated_nb_of_predictions = aggregated_nb_of_predictions[slicer]\n\n # computing the class_probabilities by dividing the aggregated result with result_numsamples\n class_probabilities = aggregated_results / aggregated_nb_of_predictions\n\n if regions_class_order is None:\n predicted_segmentation = class_probabilities.argmax(0)\n else:\n if all_in_gpu:\n class_probabilities_here = class_probabilities.detach().cpu().numpy()\n else:\n class_probabilities_here = class_probabilities\n predicted_segmentation = np.zeros(\n class_probabilities_here.shape[1:], dtype=np.float32)\n for i, c in enumerate(regions_class_order):\n predicted_segmentation[class_probabilities_here[i] > 0.5] = c\n\n if all_in_gpu:\n if verbose:\n print(\"copying results to CPU\")\n\n if regions_class_order is None:\n predicted_segmentation = predicted_segmentation.detach().cpu().numpy()\n\n class_probabilities = class_probabilities.detach().cpu().numpy()\n\n if verbose:\n print(\"prediction done\")\n return predicted_segmentation, class_probabilities\n\n def _internal_predict_3D_2Dconv(self, x: np.ndarray, min_size: Tuple[int, int], do_mirroring: bool,\n mirror_axes: tuple = (0, 1), regions_class_order: tuple = None,\n pad_border_mode: str = \"constant\", pad_kwargs: dict = None,\n all_in_gpu: bool = False, verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:\n if all_in_gpu:\n raise NotImplementedError\n assert len(x.shape) == 4, \"data must be c, x, y, z\"\n predicted_segmentation = []\n softmax_pred = []\n for s in range(x.shape[1]):\n pred_seg, softmax_pres = self._internal_predict_2D_2Dconv(\n x[:, s], min_size, do_mirroring, mirror_axes, regions_class_order, pad_border_mode, pad_kwargs, verbose)\n predicted_segmentation.append(pred_seg[None])\n softmax_pred.append(softmax_pres[None])\n predicted_segmentation = np.vstack(predicted_segmentation)\n softmax_pred = np.vstack(softmax_pred).transpose((1, 0, 2, 3))\n return predicted_segmentation, softmax_pred\n\n def predict_3D_pseudo3D_2Dconv(self, x: np.ndarray, min_size: Tuple[int, int], do_mirroring: bool,\n mirror_axes: tuple = (0, 1), regions_class_order: tuple = None,\n pseudo3D_slices: int = 5, all_in_gpu: bool = False,\n pad_border_mode: str = \"constant\", pad_kwargs: dict = None,\n verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:\n if all_in_gpu:\n raise NotImplementedError\n assert len(x.shape) == 4, \"data must be c, x, y, z\"\n assert pseudo3D_slices % 2 == 1, \"pseudo3D_slices must be odd\"\n extra_slices = (pseudo3D_slices - 1) // 2\n\n shp_for_pad = np.array(x.shape)\n shp_for_pad[1] = extra_slices\n\n pad = np.zeros(shp_for_pad, dtype=np.float32)\n data = np.concatenate((pad, x, pad), 1)\n\n predicted_segmentation = []\n softmax_pred = []\n for s in range(extra_slices, data.shape[1] - extra_slices):\n d = data[:, (s - extra_slices):(s + extra_slices + 1)]\n d = d.reshape((-1, d.shape[-2], d.shape[-1]))\n pred_seg, softmax_pres = \\\n self._internal_predict_2D_2Dconv(d, min_size, do_mirroring, mirror_axes,\n regions_class_order, pad_border_mode, pad_kwargs, verbose)\n predicted_segmentation.append(pred_seg[None])\n softmax_pred.append(softmax_pres[None])\n predicted_segmentation = np.vstack(predicted_segmentation)\n softmax_pred = np.vstack(softmax_pred).transpose((1, 0, 2, 3))\n\n return predicted_segmentation, softmax_pred\n\n def _internal_predict_3D_2Dconv_tiled(self, x: np.ndarray, patch_size: Tuple[int, int], do_mirroring: bool,\n mirror_axes: tuple = (0, 1), step_size: float = 0.5,\n regions_class_order: tuple = None, use_gaussian: bool = False,\n pad_border_mode: str = \"edge\", pad_kwargs: dict = None,\n all_in_gpu: bool = False,\n verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:\n if all_in_gpu:\n raise NotImplementedError\n\n assert len(x.shape) == 4, \"data must be c, x, y, z\"\n\n predicted_segmentation = []\n softmax_pred = []\n\n for s in range(x.shape[1]):\n pred_seg, softmax_pres = self._internal_predict_2D_2Dconv_tiled(\n x[:, s], step_size, do_mirroring, mirror_axes, patch_size, regions_class_order, use_gaussian,\n pad_border_mode, pad_kwargs, all_in_gpu, verbose)\n\n predicted_segmentation.append(pred_seg[None])\n softmax_pred.append(softmax_pres[None])\n\n predicted_segmentation = np.vstack(predicted_segmentation)\n softmax_pred = np.vstack(softmax_pred).transpose((1, 0, 2, 3))\n\n return predicted_segmentation, softmax_pred" }, { "identifier": "ModelLossSemsegGatedCRF", "path": "nn_transunet/trainer/loss_functions.py", "snippet": "class ModelLossSemsegGatedCRF(torch.nn.Module):\n \"\"\"\n This module provides an implementation of the Gated CRF Loss for Weakly Supervised Semantic Image Segmentation.\n This loss function promotes consistent label assignment guided by input features, such as RGBXY.\n Please consider using the following bibtex for citation:\n @article{obukhov2019gated,\n author={Anton Obukhov and Stamatios Georgoulis and Dengxin Dai and Luc {Van Gool}},\n title={Gated {CRF} Loss for Weakly Supervised Semantic Image Segmentation},\n journal={CoRR},\n volume={abs/1906.04651},\n year={2019},\n url={http://arxiv.org/abs/1906.04651},\n }\n \"\"\"\n\n def forward(\n self, y_hat_softmax, kernels_desc, kernels_radius, sample, height_input, width_input,\n mask_src=None, mask_dst=None, compatibility=None, custom_modality_downsamplers=None, out_kernels_vis=False\n ):\n \"\"\"\n Performs the forward pass of the loss.\n :param y_hat_softmax: A tensor of predicted per-pixel class probabilities of size NxCxHxW\n :param kernels_desc: A list of dictionaries, each describing one Gaussian kernel composition from modalities.\n The final kernel is a weighted sum of individual kernels. Following example is a composition of\n RGBXY and XY kernels:\n kernels_desc: [{\n 'weight': 0.9, # Weight of RGBXY kernel\n 'xy': 6, # Sigma for XY\n 'rgb': 0.1, # Sigma for RGB\n },{\n 'weight': 0.1, # Weight of XY kernel\n 'xy': 6, # Sigma for XY\n }]\n :param kernels_radius: Defines size of bounding box region around each pixel in which the kernel is constructed.\n :param sample: A dictionary with modalities (except 'xy') used in kernels_desc parameter. Each of the provided\n modalities is allowed to be larger than the shape of y_hat_softmax, in such case downsampling will be\n invoked. Default downsampling method is area resize; this can be overriden by setting.\n custom_modality_downsamplers parameter.\n :param width_input, height_input: Dimensions of the full scale resolution of modalities\n :param mask_src: (optional) Source mask.\n :param mask_dst: (optional) Destination mask.\n :param compatibility: (optional) Classes compatibility matrix, defaults to Potts model.\n :param custom_modality_downsamplers: A dictionary of modality downsampling functions.\n :param out_kernels_vis: Whether to return a tensor with kernels visualized with some step.\n :return: Loss function value.\n \"\"\"\n assert y_hat_softmax.dim() == 4, 'Prediction must be a NCHW batch'\n N, C, height_pred, width_pred = y_hat_softmax.shape\n device = y_hat_softmax.device\n\n assert width_input % width_pred == 0 and height_input % height_pred == 0 and \\\n width_input * height_pred == height_input * width_pred, \\\n f'[{width_input}x{height_input}] !~= [{width_pred}x{height_pred}]'\n\n kernels = self._create_kernels(\n kernels_desc, kernels_radius, sample, N, height_pred, width_pred, device, custom_modality_downsamplers\n )\n\n denom = N * height_pred * width_pred\n\n def resize_fix_mask(mask, name):\n assert mask.dim() == 4 and mask.shape[:2] == (N, 1) and mask.dtype == torch.float32, \\\n f'{name} mask must be a NCHW batch with C=1 and dtype float32'\n if mask.shape[2:] != (height_pred, width_pred):\n mask = ModelLossSemsegGatedCRF._downsample(\n mask, 'mask', height_pred, width_pred, custom_modality_downsamplers\n )\n mask[mask != mask] = 0.0 # handle NaN\n # handle edges of mask after interpolation\n mask[mask < 1.0] = 0.0\n return mask\n\n if mask_src is not None:\n mask_src = resize_fix_mask(mask_src, 'Source')\n denom = mask_src.sum().clamp(min=1)\n mask_src = self._unfold(mask_src, kernels_radius)\n kernels = kernels * mask_src\n\n if mask_dst is not None:\n mask_dst = resize_fix_mask(mask_dst, 'Destination')\n denom = mask_dst.sum().clamp(min=1)\n mask_dst = mask_dst.view(N, 1, 1, 1, height_pred, width_pred)\n kernels = kernels * mask_dst\n\n y_hat_unfolded = self._unfold(y_hat_softmax, kernels_radius)\n\n product_kernel_x_y_hat = (kernels * y_hat_unfolded) \\\n .view(N, C, (kernels_radius * 2 + 1) ** 2, height_pred, width_pred) \\\n .sum(dim=2, keepdim=False)\n\n if compatibility is None:\n # Using shortcut for Pott's class compatibility model\n loss = -(product_kernel_x_y_hat * y_hat_softmax).sum()\n # comment out to save computation, total loss may go below 0\n loss = kernels.sum() + loss\n else:\n assert compatibility.shape == (\n C, C), f'Compatibility matrix expected shape [{C}x{C}]'\n assert (compatibility < 0).int().sum(\n ) == 0, 'Compatibility matrix must not have negative values'\n assert compatibility.diag.sum() == 0, 'Compatibility matrix diagonal must be 0'\n compat = (C - 1) * \\\n F.normalize(compatibility.float().to(device), p=1, dim=1)\n y_hat_CxNHW = y_hat_softmax.permute(\n 1, 0, 2, 3).contiguous().view(C, -1)\n product_kernel_x_y_hat_NHWxC = product_kernel_x_y_hat.permute(\n 0, 2, 3, 1).contiguous().view(-1, C)\n product_CxC = torch.mm(y_hat_CxNHW, product_kernel_x_y_hat_NHWxC)\n loss = (compat * product_CxC).sum()\n del product_CxC\n\n out = {\n 'loss': loss / denom,\n }\n\n if out_kernels_vis:\n out['kernels_vis'] = self._visualize_kernels(\n kernels, kernels_radius, height_input, width_input, height_pred, width_pred\n )\n\n return out\n\n @staticmethod\n def _downsample(img, modality, height_dst, width_dst, custom_modality_downsamplers):\n if custom_modality_downsamplers is not None and modality in custom_modality_downsamplers:\n f_down = custom_modality_downsamplers[modality]\n else:\n f_down = F.adaptive_avg_pool2d\n return f_down(img, (height_dst, width_dst))\n\n @staticmethod\n def _create_kernels(\n kernels_desc, kernels_radius, sample, N, height_pred, width_pred, device, custom_modality_downsamplers\n ):\n kernels = None\n for i, desc in enumerate(kernels_desc):\n weight = desc['weight']\n features = []\n for modality, sigma in desc.items():\n if modality == 'weight':\n continue\n if modality == 'xy':\n feature = ModelLossSemsegGatedCRF._get_mesh(\n N, height_pred, width_pred, device)\n else:\n # assert modality in sample, 'Modality {} is listed in {}-th kernel descriptor, but not present in the sample'.format(modality, i)\n feature = sample\n feature = ModelLossSemsegGatedCRF._downsample(\n feature, modality, height_pred, width_pred, custom_modality_downsamplers\n )\n feature /= sigma\n features.append(feature)\n features = torch.cat(features, dim=1)\n kernel = weight * \\\n ModelLossSemsegGatedCRF._create_kernels_from_features(\n features, kernels_radius)\n kernels = kernel if kernels is None else kernel + kernels\n return kernels\n\n @staticmethod\n def _create_kernels_from_features(features, radius):\n assert features.dim() == 4, 'Features must be a NCHW batch'\n N, C, H, W = features.shape\n kernels = ModelLossSemsegGatedCRF._unfold(features, radius)\n kernels = kernels - kernels[:, :, radius,\n radius, :, :].view(N, C, 1, 1, H, W)\n kernels = (-0.5 * kernels ** 2).sum(dim=1, keepdim=True).exp()\n kernels[:, :, radius, radius, :, :] = 0\n return kernels\n\n @staticmethod\n def _get_mesh(N, H, W, device):\n return torch.cat((\n torch.arange(0, W, 1, dtype=torch.float32, device=device).view(\n 1, 1, 1, W).repeat(N, 1, H, 1),\n torch.arange(0, H, 1, dtype=torch.float32, device=device).view(\n 1, 1, H, 1).repeat(N, 1, 1, W)\n ), 1)\n\n @staticmethod\n def _unfold(img, radius):\n assert img.dim() == 4, 'Unfolding requires NCHW batch'\n N, C, H, W = img.shape\n diameter = 2 * radius + 1\n return F.unfold(img, diameter, 1, radius).view(N, C, diameter, diameter, H, W)\n\n @staticmethod\n def _visualize_kernels(kernels, radius, height_input, width_input, height_pred, width_pred):\n diameter = 2 * radius + 1\n vis = kernels[:, :, :, :, radius::diameter, radius::diameter]\n vis_nh, vis_nw = vis.shape[-2:]\n vis = vis.permute(0, 1, 4, 2, 5, 3).contiguous().view(\n kernels.shape[0], 1, diameter * vis_nh, diameter * vis_nw)\n if vis.shape[2] > height_pred:\n vis = vis[:, :, :height_pred, :]\n if vis.shape[3] > width_pred:\n vis = vis[:, :, :, :width_pred]\n if vis.shape[2:] != (height_pred, width_pred):\n vis = F.pad(vis, [0, width_pred - vis.shape[3],\n 0, height_pred - vis.shape[2]])\n vis = F.interpolate(vis, (height_input, width_input), mode='nearest')\n return vis" } ]
from _warnings import warn from typing import Tuple from batchgenerators.utilities.file_and_folder_operations import * from nn_transunet.networks.neural_network import SegmentationNetwork from sklearn.model_selection import KFold from torch import nn from torch.cuda.amp import GradScaler, autocast from torch.optim.lr_scheduler import _LRScheduler from ..trainer.loss_functions import ModelLossSemsegGatedCRF from time import time, sleep from collections import OrderedDict from abc import abstractmethod from datetime import datetime from tqdm import trange from ..utils.dist_utils import check_call_hdfs_command, mkdir_hdfs import matplotlib import numpy as np import matplotlib.pyplot as plt import sys import torch.backends.cudnn as cudnn import torch import math import matplotlib.pyplot as plt
13,685
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. matplotlib.use("agg") def maybe_to_torch(d): if isinstance(d, list): d = [maybe_to_torch(i) if not isinstance(i, torch.Tensor) else i for i in d] elif not isinstance(d, torch.Tensor): d = torch.from_numpy(d).float() return d def poly_lr(epoch, max_epochs, initial_lr, exponent=0.9): return initial_lr * (1 - epoch / max_epochs) ** exponent def warmup_poly_lr(epoch, max_epochs, warmup_epochs, initial_lr, exponent=0.9): if epoch < warmup_epochs: return initial_lr * (float(epoch) / float(max(1.0, warmup_epochs))) epoch_rel = epoch - warmup_epochs max_epochs_rel = max_epochs - warmup_epochs return initial_lr * (1 - epoch_rel / max_epochs_rel) ** exponent def to_cuda(data, non_blocking=True, gpu_id=0): if isinstance(data, list): data = [i.cuda(gpu_id, non_blocking=non_blocking) for i in data] else: data = data.cuda(gpu_id, non_blocking=non_blocking) return data class NetworkTrainer(object): def __init__(self, deterministic=True, fp16=False): """ A generic class that can train almost any neural network (RNNs excluded). It provides basic functionality such as the training loop, tracking of training and validation losses (and the target metric if you implement it) Training can be terminated early if the validation loss (or the target metric if implemented) do not improve anymore. This is based on a moving average (MA) of the loss/metric instead of the raw values to get more smooth results. What you need to override: - __init__ - initialize - run_online_evaluation (optional) - finish_online_evaluation (optional) - validate - predict_test_case """ self.fp16 = fp16 self.amp_grad_scaler = None if deterministic: np.random.seed(12345) torch.manual_seed(12345) if torch.cuda.is_available(): torch.cuda.manual_seed_all(12345) cudnn.deterministic = True torch.backends.cudnn.benchmark = False else: cudnn.deterministic = False torch.backends.cudnn.benchmark = True ################# SET THESE IN self.initialize() ###################################
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. matplotlib.use("agg") def maybe_to_torch(d): if isinstance(d, list): d = [maybe_to_torch(i) if not isinstance(i, torch.Tensor) else i for i in d] elif not isinstance(d, torch.Tensor): d = torch.from_numpy(d).float() return d def poly_lr(epoch, max_epochs, initial_lr, exponent=0.9): return initial_lr * (1 - epoch / max_epochs) ** exponent def warmup_poly_lr(epoch, max_epochs, warmup_epochs, initial_lr, exponent=0.9): if epoch < warmup_epochs: return initial_lr * (float(epoch) / float(max(1.0, warmup_epochs))) epoch_rel = epoch - warmup_epochs max_epochs_rel = max_epochs - warmup_epochs return initial_lr * (1 - epoch_rel / max_epochs_rel) ** exponent def to_cuda(data, non_blocking=True, gpu_id=0): if isinstance(data, list): data = [i.cuda(gpu_id, non_blocking=non_blocking) for i in data] else: data = data.cuda(gpu_id, non_blocking=non_blocking) return data class NetworkTrainer(object): def __init__(self, deterministic=True, fp16=False): """ A generic class that can train almost any neural network (RNNs excluded). It provides basic functionality such as the training loop, tracking of training and validation losses (and the target metric if you implement it) Training can be terminated early if the validation loss (or the target metric if implemented) do not improve anymore. This is based on a moving average (MA) of the loss/metric instead of the raw values to get more smooth results. What you need to override: - __init__ - initialize - run_online_evaluation (optional) - finish_online_evaluation (optional) - validate - predict_test_case """ self.fp16 = fp16 self.amp_grad_scaler = None if deterministic: np.random.seed(12345) torch.manual_seed(12345) if torch.cuda.is_available(): torch.cuda.manual_seed_all(12345) cudnn.deterministic = True torch.backends.cudnn.benchmark = False else: cudnn.deterministic = False torch.backends.cudnn.benchmark = True ################# SET THESE IN self.initialize() ###################################
self.network: Tuple[SegmentationNetwork, nn.DataParallel] = None
0
2023-10-11 05:19:25+00:00
16k
AMAAI-Lab/Video2Music
train.py
[ { "identifier": "compute_vevo_accuracy", "path": "dataset/vevo_dataset.py", "snippet": "def compute_vevo_accuracy(out, tgt):\n softmax = nn.Softmax(dim=-1)\n out = torch.argmax(softmax(out), dim=-1)\n\n out = out.flatten()\n tgt = tgt.flatten()\n\n mask = (tgt != CHORD_PAD)\n\n out = out[mask]\n tgt = tgt[mask]\n\n if(len(tgt) == 0):\n return 1.0\n\n num_right = (out == tgt)\n num_right = torch.sum(num_right).type(TORCH_FLOAT)\n\n acc = num_right / len(tgt)\n\n return acc" }, { "identifier": "create_vevo_datasets", "path": "dataset/vevo_dataset.py", "snippet": "def create_vevo_datasets(dataset_root = \"./dataset\", max_seq_chord=300, max_seq_video=300, vis_models=\"2d/clip_l14p\", emo_model=\"6c_l14p\", split_ver=\"v1\", random_seq=True, is_video=True):\n\n train_dataset = VevoDataset(\n dataset_root = dataset_root, split=\"train\", split_ver=split_ver, \n vis_models=vis_models, emo_model =emo_model, max_seq_chord=max_seq_chord, max_seq_video=max_seq_video, \n random_seq=random_seq, is_video = is_video )\n \n val_dataset = VevoDataset(\n dataset_root = dataset_root, split=\"val\", split_ver=split_ver, \n vis_models=vis_models, emo_model =emo_model, max_seq_chord=max_seq_chord, max_seq_video=max_seq_video, \n random_seq=random_seq, is_video = is_video )\n \n test_dataset = VevoDataset(\n dataset_root = dataset_root, split=\"test\", split_ver=split_ver, \n vis_models=vis_models, emo_model =emo_model, max_seq_chord=max_seq_chord, max_seq_video=max_seq_video, \n random_seq=random_seq, is_video = is_video )\n \n return train_dataset, val_dataset, test_dataset" }, { "identifier": "MusicTransformer", "path": "model/music_transformer.py", "snippet": "class MusicTransformer(nn.Module):\n def __init__(self, n_layers=6, num_heads=8, d_model=512, dim_feedforward=1024,\n dropout=0.1, max_sequence_midi=2048, max_sequence_chord=300, rpr=False):\n super(MusicTransformer, self).__init__()\n\n self.dummy = DummyDecoder()\n self.nlayers = n_layers\n self.nhead = num_heads\n self.d_model = d_model\n self.d_ff = dim_feedforward\n self.dropout = dropout\n self.max_seq_midi = max_sequence_midi\n self.max_seq_chord = max_sequence_chord\n self.rpr = rpr\n\n # Input embedding for video and music features\n self.embedding = nn.Embedding(CHORD_SIZE, self.d_model)\n\n # self.embedding_key = nn.Embedding(1, self.d_model)\n self.embedding_root = nn.Embedding(CHORD_ROOT_SIZE, self.d_model)\n self.embedding_attr = nn.Embedding(CHORD_ATTR_SIZE, self.d_model)\n\n self.positional_encoding = PositionalEncoding(self.d_model, self.dropout, self.max_seq_chord)\n self.Linear_chord = nn.Linear(self.d_model+1, self.d_model)\n\n # Base transformer\n if(not self.rpr):\n self.transformer = nn.Transformer(\n d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,\n num_decoder_layers=0, dropout=self.dropout, # activation=self.ff_activ,\n dim_feedforward=self.d_ff, custom_decoder=self.dummy\n )\n # RPR Transformer\n else:\n encoder_norm = LayerNorm(self.d_model)\n encoder_layer = TransformerEncoderLayerRPR(self.d_model, self.nhead, self.d_ff, self.dropout, er_len=self.max_seq_chord)\n\n encoder = TransformerEncoderRPR(encoder_layer, self.nlayers, encoder_norm)\n self.transformer = nn.Transformer(\n d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,\n num_decoder_layers=0, dropout=self.dropout, # activation=self.ff_activ,\n dim_feedforward=self.d_ff, custom_decoder=self.dummy, custom_encoder=encoder\n )\n # Final output is a softmaxed linear layer\n self.Wout = nn.Linear(self.d_model, CHORD_SIZE)\n self.Wout_root = nn.Linear(self.d_model, CHORD_ROOT_SIZE)\n self.Wout_attr = nn.Linear(self.d_model, CHORD_ATTR_SIZE)\n self.softmax = nn.Softmax(dim=-1)\n\n # forward\n def forward(self, x, x_root, x_attr, feature_key, mask=True):\n if(mask is True):\n mask = self.transformer.generate_square_subsequent_mask(x.shape[1]).to(get_device())\n else:\n mask = None\n\n ### Chord + Key (DECODER) ###\n # x = self.embedding(x)\n \n x_root = self.embedding_root(x_root)\n x_attr = self.embedding_attr(x_attr)\n x = x_root + x_attr\n\n feature_key_padded = torch.full((x.shape[0], x.shape[1], 1), feature_key.item())\n feature_key_padded = feature_key_padded.to(get_device())\n x = torch.cat([x, feature_key_padded], dim=-1)\n xf = self.Linear_chord(x)\n\n ### POSITIONAL ENCODING ###\n xf = xf.permute(1,0,2) # -> (max_seq-1, batch_size, d_model)\n xf = self.positional_encoding(xf)\n \n ### TRANSFORMER ###\n x_out = self.transformer(src=xf, tgt=xf, tgt_mask=mask)\n x_out = x_out.permute(1,0,2)\n \n if IS_SEPERATED:\n y_root = self.Wout_root(x_out)\n y_attr = self.Wout_attr(x_out)\n del mask\n return y_root, y_attr\n else:\n y = self.Wout(x_out)\n del mask\n return y\n\n # generate\n def generate(self, feature_key=None, primer=None, primer_root=None, primer_attr=None, target_seq_length=300, beam=0, beam_chance=1.0):\n assert (not self.training), \"Cannot generate while in training mode\"\n\n with open('dataset/vevo_meta/chord_inv.json') as json_file:\n chordInvDic = json.load(json_file)\n with open('dataset/vevo_meta/chord_root.json') as json_file:\n chordRootDic = json.load(json_file)\n with open('dataset/vevo_meta/chord_attr.json') as json_file:\n chordAttrDic = json.load(json_file)\n\n print(\"Generating sequence of max length:\", target_seq_length)\n gen_seq = torch.full((1,target_seq_length), CHORD_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n gen_seq_root = torch.full((1,target_seq_length), CHORD_ROOT_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n gen_seq_attr = torch.full((1,target_seq_length), CHORD_ATTR_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n \n num_primer = len(primer)\n\n gen_seq[..., :num_primer] = primer.type(TORCH_LABEL_TYPE).to(get_device())\n gen_seq_root[..., :num_primer] = primer_root.type(TORCH_LABEL_TYPE).to(get_device())\n \n gen_seq_attr[..., :num_primer] = primer_attr.type(TORCH_LABEL_TYPE).to(get_device())\n\n cur_i = num_primer\n while(cur_i < target_seq_length):\n # gen_seq_batch = gen_seq.clone()\n # y = self.softmax(self.forward(gen_seq[..., :cur_i]))[..., :CHORD_END]\n y = self.softmax( self.forward( gen_seq[..., :cur_i], gen_seq_root[..., :cur_i], gen_seq_attr[..., :cur_i], feature_key) )[..., :CHORD_END]\n \n token_probs = y[:, cur_i-1, :]\n if(beam == 0):\n beam_ran = 2.0\n else:\n beam_ran = random.uniform(0,1)\n if(beam_ran <= beam_chance):\n token_probs = token_probs.flatten()\n top_res, top_i = torch.topk(token_probs, beam)\n beam_rows = top_i // CHORD_SIZE\n beam_cols = top_i % CHORD_SIZE\n gen_seq = gen_seq[beam_rows, :]\n gen_seq[..., cur_i] = beam_cols\n else:\n distrib = torch.distributions.categorical.Categorical(probs=token_probs)\n next_token = distrib.sample()\n #print(\"next token:\",next_token)\n gen_seq[:, cur_i] = next_token\n gen_chord = chordInvDic[ str( next_token.item() ) ]\n \n chord_arr = gen_chord.split(\":\")\n if len(chord_arr) == 1:\n chordRootID = chordRootDic[chord_arr[0]]\n chordAttrID = 1\n chordRootID = torch.tensor([chordRootID]).to(get_device())\n chordAttrID = torch.tensor([chordAttrID]).to(get_device())\n gen_seq_root[:, cur_i] = chordRootID\n gen_seq_attr[:, cur_i] = chordAttrID\n elif len(chord_arr) == 2:\n chordRootID = chordRootDic[chord_arr[0]]\n chordAttrID = chordAttrDic[chord_arr[1]]\n chordRootID = torch.tensor([chordRootID]).to(get_device())\n chordAttrID = torch.tensor([chordAttrID]).to(get_device())\n gen_seq_root[:, cur_i] = chordRootID\n gen_seq_attr[:, cur_i] = chordAttrID\n \n # Let the transformer decide to end if it wants to\n if(next_token == CHORD_END):\n print(\"Model called end of sequence at:\", cur_i, \"/\", target_seq_length)\n break\n \n cur_i += 1\n if(cur_i % 50 == 0):\n print(cur_i, \"/\", target_seq_length)\n return gen_seq[:, :cur_i]" }, { "identifier": "VideoMusicTransformer", "path": "model/video_music_transformer.py", "snippet": "class VideoMusicTransformer(nn.Module):\n def __init__(self, n_layers=6, num_heads=8, d_model=512, dim_feedforward=1024,\n dropout=0.1, max_sequence_midi =2048, max_sequence_video=300, max_sequence_chord=300, total_vf_dim = 0, rpr=False):\n super(VideoMusicTransformer, self).__init__()\n self.nlayers = n_layers\n self.nhead = num_heads\n self.d_model = d_model\n self.d_ff = dim_feedforward\n self.dropout = dropout\n self.max_seq_midi = max_sequence_midi\n self.max_seq_video = max_sequence_video\n self.max_seq_chord = max_sequence_chord\n self.rpr = rpr\n\n # Input embedding for video and music features\n self.embedding = nn.Embedding(CHORD_SIZE, self.d_model)\n self.embedding_root = nn.Embedding(CHORD_ROOT_SIZE, self.d_model)\n self.embedding_attr = nn.Embedding(CHORD_ATTR_SIZE, self.d_model)\n \n self.total_vf_dim = total_vf_dim\n self.Linear_vis = nn.Linear(self.total_vf_dim, self.d_model)\n self.Linear_chord = nn.Linear(self.d_model+1, self.d_model)\n \n # Positional encoding\n self.positional_encoding = PositionalEncoding(self.d_model, self.dropout, self.max_seq_chord)\n self.positional_encoding_video = PositionalEncoding(self.d_model, self.dropout, self.max_seq_video)\n\n # Add condition (minor or major)\n self.condition_linear = nn.Linear(1, self.d_model)\n \n # Base transformer\n if(not self.rpr):\n self.transformer = nn.Transformer(\n d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,\n num_decoder_layers=self.nlayers, dropout=self.dropout, # activation=self.ff_activ,\n dim_feedforward=self.d_ff\n )\n # RPR Transformer\n else:\n decoder_norm = LayerNorm(self.d_model)\n decoder_layer = TransformerDecoderLayerRPR(self.d_model, self.nhead, self.d_ff, self.dropout, er_len=self.max_seq_chord)\n decoder = TransformerDecoderRPR(decoder_layer, self.nlayers, decoder_norm)\n self.transformer = nn.Transformer(\n d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,\n num_decoder_layers=self.nlayers, dropout=self.dropout, # activation=self.ff_activ,\n dim_feedforward=self.d_ff, custom_decoder=decoder\n ) \n \n self.Wout = nn.Linear(self.d_model, CHORD_SIZE)\n self.Wout_root = nn.Linear(self.d_model, CHORD_ROOT_SIZE)\n self.Wout_attr = nn.Linear(self.d_model, CHORD_ATTR_SIZE)\n self.softmax = nn.Softmax(dim=-1)\n \n def forward(self, x, x_root, x_attr, feature_semantic_list, feature_key, feature_scene_offset, feature_motion, feature_emotion, mask=True):\n if(mask is True):\n mask = self.transformer.generate_square_subsequent_mask(x.shape[1]).to(get_device())\n else:\n mask = None\n \n x_root = self.embedding_root(x_root)\n x_attr = self.embedding_attr(x_attr)\n x = x_root + x_attr\n\n feature_key_padded = torch.full((x.shape[0], x.shape[1], 1), feature_key.item())\n feature_key_padded = feature_key_padded.to(get_device())\n x = torch.cat([x, feature_key_padded], dim=-1)\n\n xf = self.Linear_chord(x)\n\n ### Video (SemanticList + SceneOffset + Motion + Emotion) (ENCODER) ###\n vf_concat = feature_semantic_list[0].float()\n\n for i in range(1, len(feature_semantic_list)):\n vf_concat = torch.cat( (vf_concat, feature_semantic_list[i].float()), dim=2) \n \n vf_concat = torch.cat([vf_concat, feature_scene_offset.unsqueeze(-1).float()], dim=-1) # -> (max_seq_video, batch_size, d_model+1)\n vf_concat = torch.cat([vf_concat, feature_motion.unsqueeze(-1).float()], dim=-1) # -> (max_seq_video, batch_size, d_model+1)\n vf_concat = torch.cat([vf_concat, feature_emotion.float()], dim=-1) # -> (max_seq_video, batch_size, d_model+1)\n vf = self.Linear_vis(vf_concat)\n \n ### POSITIONAL ENCODING ###\n xf = xf.permute(1,0,2) # -> (max_seq-1, batch_size, d_model)\n vf = vf.permute(1,0,2) # -> (max_seq_video, batch_size, d_model)\n xf = self.positional_encoding(xf)\n vf = self.positional_encoding_video(vf)\n\n ### TRANSFORMER ###\n x_out = self.transformer(src=vf, tgt=xf, tgt_mask=mask)\n x_out = x_out.permute(1,0,2)\n\n if IS_SEPERATED:\n y_root = self.Wout_root(x_out)\n y_attr = self.Wout_attr(x_out)\n del mask\n return y_root, y_attr\n else:\n y = self.Wout(x_out)\n del mask\n return y\n \n def generate(self, feature_semantic_list = [], feature_key=None, feature_scene_offset=None, feature_motion=None, feature_emotion=None,\n primer=None, primer_root=None, primer_attr=None, target_seq_length=300, beam=0, beam_chance=1.0, max_conseq_N = 0, max_conseq_chord = 2):\n \n assert (not self.training), \"Cannot generate while in training mode\"\n print(\"Generating sequence of max length:\", target_seq_length)\n\n with open('dataset/vevo_meta/chord_inv.json') as json_file:\n chordInvDic = json.load(json_file)\n with open('dataset/vevo_meta/chord_root.json') as json_file:\n chordRootDic = json.load(json_file)\n with open('dataset/vevo_meta/chord_attr.json') as json_file:\n chordAttrDic = json.load(json_file)\n\n gen_seq = torch.full((1,target_seq_length), CHORD_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n gen_seq_root = torch.full((1,target_seq_length), CHORD_ROOT_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n gen_seq_attr = torch.full((1,target_seq_length), CHORD_ATTR_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n \n num_primer = len(primer)\n gen_seq[..., :num_primer] = primer.type(TORCH_LABEL_TYPE).to(get_device())\n gen_seq_root[..., :num_primer] = primer_root.type(TORCH_LABEL_TYPE).to(get_device())\n gen_seq_attr[..., :num_primer] = primer_attr.type(TORCH_LABEL_TYPE).to(get_device())\n\n cur_i = num_primer\n while(cur_i < target_seq_length):\n y = self.softmax( self.forward( gen_seq[..., :cur_i], gen_seq_root[..., :cur_i], gen_seq_attr[..., :cur_i], \n feature_semantic_list, feature_key, feature_scene_offset, feature_motion, feature_emotion) )[..., :CHORD_END]\n \n token_probs = y[:, cur_i-1, :]\n if(beam == 0):\n beam_ran = 2.0\n else:\n beam_ran = random.uniform(0,1)\n if(beam_ran <= beam_chance):\n token_probs = token_probs.flatten()\n top_res, top_i = torch.topk(token_probs, beam)\n beam_rows = top_i // CHORD_SIZE\n beam_cols = top_i % CHORD_SIZE\n gen_seq = gen_seq[beam_rows, :]\n gen_seq[..., cur_i] = beam_cols\n else:\n # token_probs.shape : [1, 157] \n # 0: N, 1: C, ... , 156: B:maj7\n # 157 chordEnd 158 padding\n if max_conseq_N == 0:\n token_probs[0][0] = 0.0\n isMaxChord = True\n if cur_i >= max_conseq_chord :\n preChord = gen_seq[0][cur_i-1].item() \n for k in range (1, max_conseq_chord):\n if preChord != gen_seq[0][cur_i-1-k].item():\n isMaxChord = False\n else:\n isMaxChord = False\n \n if isMaxChord:\n preChord = gen_seq[0][cur_i-1].item()\n token_probs[0][preChord] = 0.0\n \n distrib = torch.distributions.categorical.Categorical(probs=token_probs)\n next_token = distrib.sample()\n gen_seq[:, cur_i] = next_token\n gen_chord = chordInvDic[ str( next_token.item() ) ]\n \n chord_arr = gen_chord.split(\":\")\n if len(chord_arr) == 1:\n chordRootID = chordRootDic[chord_arr[0]]\n chordAttrID = 1\n chordRootID = torch.tensor([chordRootID]).to(get_device())\n chordAttrID = torch.tensor([chordAttrID]).to(get_device())\n gen_seq_root[:, cur_i] = chordRootID\n gen_seq_attr[:, cur_i] = chordAttrID\n elif len(chord_arr) == 2:\n chordRootID = chordRootDic[chord_arr[0]]\n chordAttrID = chordAttrDic[chord_arr[1]]\n chordRootID = torch.tensor([chordRootID]).to(get_device())\n chordAttrID = torch.tensor([chordAttrID]).to(get_device())\n gen_seq_root[:, cur_i] = chordRootID\n gen_seq_attr[:, cur_i] = chordAttrID\n \n # Let the transformer decide to end if it wants to\n if(next_token == CHORD_END):\n print(\"Model called end of sequence at:\", cur_i, \"/\", target_seq_length)\n break\n cur_i += 1\n if(cur_i % 50 == 0):\n print(cur_i, \"/\", target_seq_length)\n return gen_seq[:, :cur_i]" }, { "identifier": "SmoothCrossEntropyLoss", "path": "model/loss.py", "snippet": "class SmoothCrossEntropyLoss(_Loss):\n \"\"\"\n https://arxiv.org/abs/1512.00567\n \"\"\"\n __constants__ = ['label_smoothing', 'vocab_size', 'ignore_index', 'reduction']\n\n def __init__(self, label_smoothing, vocab_size, ignore_index=-100, reduction='mean', is_logits=True):\n assert 0.0 <= label_smoothing <= 1.0\n super().__init__(reduction=reduction)\n\n self.label_smoothing = label_smoothing\n self.vocab_size = vocab_size\n self.ignore_index = ignore_index\n self.input_is_logits = is_logits\n\n def forward(self, input, target):\n \"\"\"\n Args:\n input: [B * T, V]\n target: [B * T]\n Returns:\n cross entropy: [1]\n \"\"\"\n mask = (target == self.ignore_index).unsqueeze(-1)\n q = F.one_hot(target.long(), self.vocab_size).type(torch.float32)\n u = 1.0 / self.vocab_size\n q_prime = (1.0 - self.label_smoothing) * q + self.label_smoothing * u\n q_prime = q_prime.masked_fill(mask, 0)\n\n ce = self.cross_entropy_with_logits(q_prime, input)\n if self.reduction == 'mean':\n lengths = torch.sum(target != self.ignore_index)\n return ce.sum() / lengths\n elif self.reduction == 'sum':\n return ce.sum()\n else:\n raise NotImplementedError\n\n def cross_entropy_with_logits(self, p, q):\n return -torch.sum(p * (q - q.logsumexp(dim=-1, keepdim=True)), dim=-1)" }, { "identifier": "get_device", "path": "utilities/device.py", "snippet": "def get_device():\n \"\"\"\n ----------\n Author: Damon Gwinn\n ----------\n Grabs the default device. Default device is CUDA if available and use_cuda is not False, CPU otherwise.\n ----------\n \"\"\"\n\n if((not USE_CUDA) or (TORCH_CUDA_DEVICE is None)):\n return TORCH_CPU_DEVICE\n else:\n return TORCH_CUDA_DEVICE" }, { "identifier": "use_cuda", "path": "utilities/device.py", "snippet": "def use_cuda(cuda_bool):\n \"\"\"\n ----------\n Author: Damon Gwinn\n ----------\n Sets whether to use CUDA (if available), or use the CPU (not recommended)\n ----------\n \"\"\"\n\n global USE_CUDA\n USE_CUDA = cuda_bool" }, { "identifier": "LrStepTracker", "path": "utilities/lr_scheduling.py", "snippet": "class LrStepTracker:\n \"\"\"\n ----------\n Author: Ryan Marshall\n Modified: Damon Gwinn\n ----------\n Class for custom learn rate scheduler (to be used by torch.optim.lr_scheduler.LambdaLR).\n\n Learn rate for each step (batch) given the warmup steps is:\n lr = [ 1/sqrt(d_model) ] * min[ 1/sqrt(step) , step * (warmup_steps)^-1.5 ]\n\n This is from Attention is All you Need (https://arxiv.org/abs/1706.03762)\n ----------\n \"\"\"\n\n def __init__(self, model_dim=512, warmup_steps=4000, init_steps=0):\n # Store Values\n self.warmup_steps = warmup_steps\n self.model_dim = model_dim\n self.init_steps = init_steps\n\n # Begin Calculations\n self.invsqrt_dim = (1 / math.sqrt(model_dim))\n self.invsqrt_warmup = (1 / (warmup_steps * math.sqrt(warmup_steps)))\n\n # step\n def step(self, step):\n \"\"\"\n ----------\n Author: Ryan Marshall\n Modified: Damon Gwinn\n ----------\n Method to pass to LambdaLR. Increments the step and computes the new learn rate.\n ----------\n \"\"\"\n\n step += self.init_steps\n if(step <= self.warmup_steps):\n return self.invsqrt_dim * self.invsqrt_warmup * step\n else:\n invsqrt_step = (1 / math.sqrt(step))\n return self.invsqrt_dim * invsqrt_step" }, { "identifier": "get_lr", "path": "utilities/lr_scheduling.py", "snippet": "def get_lr(optimizer):\n \"\"\"\n ----------\n Author: Damon Gwinn\n ----------\n Hack to get the current learn rate of the model\n ----------\n \"\"\"\n\n for param_group in optimizer.param_groups:\n return param_group['lr']" }, { "identifier": "parse_train_args", "path": "utilities/argument_funcs.py", "snippet": "def parse_train_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-dataset_dir\", type=str, default=\"./dataset/\", help=\"Folder of VEVO dataset\")\n \n parser.add_argument(\"-input_dir_music\", type=str, default=\"./dataset/vevo_chord/\" + MUSIC_TYPE, help=\"Folder of video CNN feature files\")\n parser.add_argument(\"-input_dir_video\", type=str, default=\"./dataset/vevo_vis\", help=\"Folder of video CNN feature files\")\n\n parser.add_argument(\"-output_dir\", type=str, default=\"./saved_models\", help=\"Folder to save model weights. Saves one every epoch\")\n \n parser.add_argument(\"-weight_modulus\", type=int, default=1, help=\"How often to save epoch weights (ex: value of 10 means save every 10 epochs)\")\n parser.add_argument(\"-print_modulus\", type=int, default=1, help=\"How often to print train results for a batch (batch loss, learn rate, etc.)\")\n parser.add_argument(\"-n_workers\", type=int, default=1, help=\"Number of threads for the dataloader\")\n parser.add_argument(\"--force_cpu\", action=\"store_true\", help=\"Forces model to run on a cpu even when gpu is available\")\n parser.add_argument(\"--no_tensorboard\", action=\"store_true\", help=\"Turns off tensorboard result reporting\")\n parser.add_argument(\"-continue_weights\", type=str, default=None, help=\"Model weights to continue training based on\")\n parser.add_argument(\"-continue_epoch\", type=int, default=None, help=\"Epoch the continue_weights model was at\")\n parser.add_argument(\"-lr\", type=float, default=None, help=\"Constant learn rate. Leave as None for a custom scheduler.\")\n parser.add_argument(\"-ce_smoothing\", type=float, default=None, help=\"Smoothing parameter for smoothed cross entropy loss (defaults to no smoothing)\")\n parser.add_argument(\"-batch_size\", type=int, default=1, help=\"Batch size to use\")\n parser.add_argument(\"-epochs\", type=int, default=5, help=\"Number of epochs to use\")\n\n parser.add_argument(\"-max_sequence_midi\", type=int, default=2048, help=\"Maximum midi sequence to consider\")\n parser.add_argument(\"-max_sequence_video\", type=int, default=300, help=\"Maximum video sequence to consider\")\n parser.add_argument(\"-max_sequence_chord\", type=int, default=300, help=\"Maximum video sequence to consider\")\n\n parser.add_argument(\"-n_layers\", type=int, default=6, help=\"Number of decoder layers to use\")\n parser.add_argument(\"-num_heads\", type=int, default=8, help=\"Number of heads to use for multi-head attention\")\n parser.add_argument(\"-d_model\", type=int, default=512, help=\"Dimension of the model (output dim of embedding layers, etc.)\")\n parser.add_argument(\"-dim_feedforward\", type=int, default=1024, help=\"Dimension of the feedforward layer\")\n parser.add_argument(\"-dropout\", type=float, default=0.1, help=\"Dropout rate\")\n\n parser.add_argument(\"-is_video\", type=bool, default=IS_VIDEO, help=\"MusicTransformer or VideoMusicTransformer\")\n\n if IS_VIDEO:\n parser.add_argument(\"-vis_models\", type=str, default=VIS_MODELS_SORTED, help=\"...\")\n else:\n parser.add_argument(\"-vis_models\", type=str, default=\"\", help=\"...\")\n\n parser.add_argument(\"-emo_model\", type=str, default=\"6c_l14p\", help=\"...\")\n parser.add_argument(\"-rpr\", type=bool, default=RPR, help=\"...\")\n return parser.parse_args()" }, { "identifier": "print_train_args", "path": "utilities/argument_funcs.py", "snippet": "def print_train_args(args):\n print(SEPERATOR)\n \n print(\"dataset_dir:\", args.dataset_dir )\n \n print(\"input_dir_music:\", args.input_dir_music)\n print(\"input_dir_video:\", args.input_dir_video)\n\n print(\"output_dir:\", args.output_dir)\n\n print(\"weight_modulus:\", args.weight_modulus)\n print(\"print_modulus:\", args.print_modulus)\n print(\"\")\n print(\"n_workers:\", args.n_workers)\n print(\"force_cpu:\", args.force_cpu)\n print(\"tensorboard:\", not args.no_tensorboard)\n print(\"\")\n print(\"continue_weights:\", args.continue_weights)\n print(\"continue_epoch:\", args.continue_epoch)\n print(\"\")\n print(\"lr:\", args.lr)\n print(\"ce_smoothing:\", args.ce_smoothing)\n print(\"batch_size:\", args.batch_size)\n print(\"epochs:\", args.epochs)\n print(\"\")\n print(\"rpr:\", args.rpr)\n\n print(\"max_sequence_midi:\", args.max_sequence_midi)\n print(\"max_sequence_video:\", args.max_sequence_video)\n print(\"max_sequence_chord:\", args.max_sequence_chord)\n \n print(\"n_layers:\", args.n_layers)\n print(\"num_heads:\", args.num_heads)\n print(\"d_model:\", args.d_model)\n print(\"\")\n print(\"dim_feedforward:\", args.dim_feedforward)\n print(\"dropout:\", args.dropout)\n print(\"is_video:\", args.is_video)\n\n print(SEPERATOR)\n print(\"\")" }, { "identifier": "write_model_params", "path": "utilities/argument_funcs.py", "snippet": "def write_model_params(args, output_file):\n o_stream = open(output_file, \"w\")\n\n o_stream.write(\"rpr: \" + str(args.rpr) + \"\\n\")\n o_stream.write(\"lr: \" + str(args.lr) + \"\\n\")\n o_stream.write(\"ce_smoothing: \" + str(args.ce_smoothing) + \"\\n\")\n o_stream.write(\"batch_size: \" + str(args.batch_size) + \"\\n\")\n\n o_stream.write(\"max_sequence_midi: \" + str(args.max_sequence_midi) + \"\\n\")\n o_stream.write(\"max_sequence_video: \" + str(args.max_sequence_video) + \"\\n\")\n o_stream.write(\"max_sequence_chord: \" + str(args.max_sequence_chord) + \"\\n\")\n \n o_stream.write(\"n_layers: \" + str(args.n_layers) + \"\\n\")\n o_stream.write(\"num_heads: \" + str(args.num_heads) + \"\\n\")\n o_stream.write(\"d_model: \" + str(args.d_model) + \"\\n\")\n o_stream.write(\"dim_feedforward: \" + str(args.dim_feedforward) + \"\\n\")\n o_stream.write(\"dropout: \" + str(args.dropout) + \"\\n\")\n\n o_stream.write(\"is_video: \" + str(args.is_video) + \"\\n\")\n o_stream.write(\"vis_models: \" + str(args.vis_models) + \"\\n\")\n o_stream.write(\"input_dir_music: \" + str(args.input_dir_music) + \"\\n\")\n o_stream.write(\"input_dir_video: \" + str(args.input_dir_video) + \"\\n\")\n\n o_stream.close()" }, { "identifier": "train_epoch", "path": "utilities/run_model_vevo.py", "snippet": "def train_epoch(cur_epoch, model, dataloader, \n train_loss_func, train_loss_emotion_func,\n opt, lr_scheduler=None, print_modulus=1, isVideo=True):\n \n loss_chord = -1\n loss_emotion = -1\n model.train()\n for batch_num, batch in enumerate(dataloader):\n time_before = time.time()\n opt.zero_grad()\n\n x = batch[\"x\"].to(get_device())\n tgt = batch[\"tgt\"].to(get_device())\n x_root = batch[\"x_root\"].to(get_device())\n tgt_root = batch[\"tgt_root\"].to(get_device())\n x_attr = batch[\"x_attr\"].to(get_device())\n tgt_attr = batch[\"tgt_attr\"].to(get_device())\n tgt_emotion = batch[\"tgt_emotion\"].to(get_device())\n tgt_emotion_prob = batch[\"tgt_emotion_prob\"].to(get_device())\n \n feature_semantic_list = [] \n for feature_semantic in batch[\"semanticList\"]:\n feature_semantic_list.append( feature_semantic.to(get_device()) )\n\n feature_key = batch[\"key\"].to(get_device())\n feature_scene_offset = batch[\"scene_offset\"].to(get_device())\n feature_motion = batch[\"motion\"].to(get_device())\n feature_emotion = batch[\"emotion\"].to(get_device())\n\n if isVideo:\n # use VideoMusicTransformer\n if IS_SEPERATED:\n y_root, y_attr = model(x,\n x_root,\n x_attr,\n feature_semantic_list, \n feature_key, \n feature_scene_offset,\n feature_motion,\n feature_emotion)\n \n y_root = y_root.reshape(y_root.shape[0] * y_root.shape[1], -1)\n y_attr = y_attr.reshape(y_attr.shape[0] * y_attr.shape[1], -1)\n \n tgt_root = tgt_root.flatten()\n tgt_attr = tgt_attr.flatten()\n\n tgt_emotion = tgt_emotion.squeeze()\n\n loss_chord_root = train_loss_func.forward(y_root, tgt_root)\n loss_chord_attr = train_loss_func.forward(y_attr, tgt_attr)\n loss_chord = loss_chord_root + loss_chord_attr\n\n first_14 = tgt_emotion[:, :14]\n last_2 = tgt_emotion[:, -2:]\n tgt_emotion_attr = torch.cat((first_14, last_2), dim=1)\n\n loss_emotion = train_loss_emotion_func.forward(y_attr, tgt_emotion_attr)\n\n total_loss = LOSS_LAMBDA * loss_chord + (1-LOSS_LAMBDA) * loss_emotion\n total_loss.backward()\n opt.step()\n if(lr_scheduler is not None):\n lr_scheduler.step()\n \n else:\n #videomusic tran nosep\n y = model(x,\n x_root,\n x_attr,\n feature_semantic_list, \n feature_key, \n feature_scene_offset,\n feature_motion,\n feature_emotion)\n \n y = y.reshape(y.shape[0] * y.shape[1], -1)\n tgt = tgt.flatten()\n tgt_emotion = tgt_emotion.squeeze()\n loss_chord = train_loss_func.forward(y, tgt)\n loss_emotion = train_loss_emotion_func.forward(y, tgt_emotion)\n total_loss = LOSS_LAMBDA * loss_chord + (1-LOSS_LAMBDA) * loss_emotion\n total_loss.backward()\n opt.step()\n if(lr_scheduler is not None):\n lr_scheduler.step()\n\n else:\n # music transformer\n if IS_SEPERATED:\n y_root, y_attr = model(x,\n x_root,\n x_attr,\n feature_key)\n \n y_root = y_root.reshape(y_root.shape[0] * y_root.shape[1], -1)\n y_attr = y_attr.reshape(y_attr.shape[0] * y_attr.shape[1], -1)\n \n tgt_root = tgt_root.flatten()\n tgt_attr = tgt_attr.flatten()\n\n tgt_emotion = tgt_emotion.squeeze()\n\n loss_chord_root = train_loss_func.forward(y_root, tgt_root)\n loss_chord_attr = train_loss_func.forward(y_attr, tgt_attr)\n\n loss_chord = loss_chord_root + loss_chord_attr\n loss_emotion = -1\n \n total_loss = loss_chord\n total_loss.backward()\n opt.step()\n if(lr_scheduler is not None):\n lr_scheduler.step()\n else:\n # use MusicTransformer (no sep)\n y = model(x,\n x_root,\n x_attr,\n feature_key)\n \n y = y.reshape(y.shape[0] * y.shape[1], -1)\n tgt = tgt.flatten()\n\n loss_chord = train_loss_func.forward(y, tgt)\n loss_emotion = -1\n\n total_loss = loss_chord\n total_loss.backward()\n\n opt.step()\n\n if(lr_scheduler is not None):\n lr_scheduler.step()\n\n time_after = time.time()\n time_took = time_after - time_before\n \n if((batch_num+1) % print_modulus == 0):\n print(SEPERATOR)\n print(\"Epoch\", cur_epoch, \" Batch\", batch_num+1, \"/\", len(dataloader))\n print(\"LR:\", get_lr(opt))\n print(\"Train loss (total):\", float(total_loss))\n print(\"Train loss (chord):\", float(loss_chord))\n print(\"Train loss (emotion):\", float(loss_emotion))\n print(\"\")\n print(\"Time (s):\", time_took)\n print(SEPERATOR)\n print(\"\")\n return" }, { "identifier": "eval_model", "path": "utilities/run_model_vevo.py", "snippet": "def eval_model(model, dataloader, \n eval_loss_func, eval_loss_emotion_func,\n isVideo = True, isGenConfusionMatrix=False):\n model.eval()\n avg_acc = -1\n avg_cor = -1\n avg_acc_cor = -1\n\n avg_h1 = -1\n avg_h3 = -1\n avg_h5 = -1\n \n avg_loss_chord = -1\n avg_loss_emotion = -1\n avg_total_loss = -1\n\n true_labels = []\n true_root_labels = []\n true_attr_labels = []\n \n pred_labels = []\n pred_root_labels = []\n pred_attr_labels = []\n \n with torch.set_grad_enabled(False):\n n_test = len(dataloader)\n n_test_cor = 0 \n\n sum_loss_chord = 0.0\n sum_loss_emotion = 0.0\n sum_total_loss = 0.0\n\n sum_acc = 0.0\n sum_cor = 0.0\n\n sum_h1 = 0.0\n sum_h3 = 0.0\n sum_h5 = 0.0\n \n for batch in dataloader:\n x = batch[\"x\"].to(get_device())\n tgt = batch[\"tgt\"].to(get_device())\n x_root = batch[\"x_root\"].to(get_device())\n tgt_root = batch[\"tgt_root\"].to(get_device())\n x_attr = batch[\"x_attr\"].to(get_device())\n tgt_attr = batch[\"tgt_attr\"].to(get_device())\n tgt_emotion = batch[\"tgt_emotion\"].to(get_device())\n tgt_emotion_prob = batch[\"tgt_emotion_prob\"].to(get_device())\n \n feature_semantic_list = [] \n for feature_semantic in batch[\"semanticList\"]:\n feature_semantic_list.append( feature_semantic.to(get_device()) )\n \n feature_key = batch[\"key\"].to(get_device())\n feature_scene_offset = batch[\"scene_offset\"].to(get_device())\n feature_motion = batch[\"motion\"].to(get_device())\n feature_emotion = batch[\"emotion\"].to(get_device())\n\n if isVideo:\n if IS_SEPERATED:\n y_root, y_attr = model(x,\n x_root,\n x_attr,\n feature_semantic_list, \n feature_key, \n feature_scene_offset,\n feature_motion,\n feature_emotion)\n\n sum_acc += float(compute_vevo_accuracy_root_attr(y_root, y_attr, tgt))\n cor = float(compute_vevo_correspondence_root_attr(y_root, y_attr, tgt, tgt_emotion, tgt_emotion_prob, EMOTION_THRESHOLD))\n if cor >= 0 :\n n_test_cor +=1\n sum_cor += cor\n\n sum_h1 += float(compute_hits_k_root_attr(y_root, y_attr, tgt,1))\n sum_h3 += float(compute_hits_k_root_attr(y_root, y_attr, tgt,3))\n sum_h5 += float(compute_hits_k_root_attr(y_root, y_attr, tgt,5))\n \n y_root = y_root.reshape(y_root.shape[0] * y_root.shape[1], -1)\n y_attr = y_attr.reshape(y_attr.shape[0] * y_attr.shape[1], -1)\n \n tgt_root = tgt_root.flatten()\n tgt_attr = tgt_attr.flatten()\n tgt_emotion = tgt_emotion.squeeze()\n\n loss_chord_root = eval_loss_func.forward(y_root, tgt_root)\n loss_chord_attr = eval_loss_func.forward(y_attr, tgt_attr)\n loss_chord = loss_chord_root + loss_chord_attr\n\n first_14 = tgt_emotion[:, :14]\n last_2 = tgt_emotion[:, -2:]\n tgt_emotion_attr = torch.cat((first_14, last_2), dim=1)\n\n loss_emotion = eval_loss_emotion_func.forward(y_attr, tgt_emotion_attr)\n total_loss = LOSS_LAMBDA * loss_chord + (1-LOSS_LAMBDA) * loss_emotion\n\n sum_loss_chord += float(loss_chord)\n sum_loss_emotion += float(loss_emotion)\n sum_total_loss += float(total_loss)\n else:\n y= model(x,\n x_root,\n x_attr,\n feature_semantic_list, \n feature_key, \n feature_scene_offset,\n feature_motion,\n feature_emotion)\n \n sum_acc += float(compute_vevo_accuracy(y, tgt ))\n cor = float(compute_vevo_correspondence(y, tgt, tgt_emotion, tgt_emotion_prob, EMOTION_THRESHOLD))\n if cor >= 0 :\n n_test_cor +=1\n sum_cor += cor\n\n sum_h1 += float(compute_hits_k(y, tgt,1))\n sum_h3 += float(compute_hits_k(y, tgt,3))\n sum_h5 += float(compute_hits_k(y, tgt,5))\n \n y = y.reshape(y.shape[0] * y.shape[1], -1)\n\n tgt = tgt.flatten()\n tgt_root = tgt_root.flatten()\n tgt_attr = tgt_attr.flatten()\n \n tgt_emotion = tgt_emotion.squeeze()\n\n loss_chord = eval_loss_func.forward(y, tgt)\n loss_emotion = eval_loss_emotion_func.forward(y, tgt_emotion)\n total_loss = LOSS_LAMBDA * loss_chord + (1-LOSS_LAMBDA) * loss_emotion\n\n sum_loss_chord += float(loss_chord)\n sum_loss_emotion += float(loss_emotion)\n sum_total_loss += float(total_loss)\n\n if isGenConfusionMatrix:\n pred = y.argmax(dim=1).detach().cpu().numpy()\n pred_root = []\n pred_attr = []\n\n for i in pred:\n if i == 0:\n pred_root.append(0)\n pred_attr.append(0)\n elif i == 157:\n pred_root.append(CHORD_ROOT_END)\n pred_attr.append(CHORD_ATTR_END)\n elif i == 158:\n pred_root.append(CHORD_ROOT_PAD)\n pred_attr.append(CHORD_ATTR_PAD)\n else:\n rootindex = int( (i-1)/13 ) + 1\n attrindex = (i-1)%13 + 1\n pred_root.append(rootindex)\n pred_attr.append(attrindex)\n \n pred_root = np.array(pred_root)\n pred_attr = np.array(pred_attr)\n\n true = tgt.detach().cpu().numpy()\n true_root = tgt_root.detach().cpu().numpy()\n true_attr = tgt_attr.detach().cpu().numpy()\n \n pred_labels.extend(pred)\n pred_root_labels.extend(pred_root)\n pred_attr_labels.extend(pred_attr)\n \n true_labels.extend(true)\n true_root_labels.extend(true_root)\n true_attr_labels.extend(true_attr)\n else:\n if IS_SEPERATED:\n y_root, y_attr = model(x,\n x_root,\n x_attr,\n feature_key)\n\n sum_acc += float(compute_vevo_accuracy_root_attr(y_root, y_attr, tgt))\n cor = float(compute_vevo_correspondence_root_attr(y_root, y_attr, tgt, tgt_emotion, tgt_emotion_prob, EMOTION_THRESHOLD))\n if cor >= 0 :\n n_test_cor +=1\n sum_cor += cor\n\n sum_h1 += float(compute_hits_k_root_attr(y_root, y_attr, tgt,1))\n sum_h3 += float(compute_hits_k_root_attr(y_root, y_attr, tgt,3))\n sum_h5 += float(compute_hits_k_root_attr(y_root, y_attr, tgt,5))\n \n y_root = y_root.reshape(y_root.shape[0] * y_root.shape[1], -1)\n y_attr = y_attr.reshape(y_attr.shape[0] * y_attr.shape[1], -1)\n \n tgt_root = tgt_root.flatten()\n tgt_attr = tgt_attr.flatten()\n tgt_emotion = tgt_emotion.squeeze()\n\n loss_chord_root = eval_loss_func.forward(y_root, tgt_root)\n loss_chord_attr = eval_loss_func.forward(y_attr, tgt_attr)\n loss_chord = loss_chord_root + loss_chord_attr\n\n first_14 = tgt_emotion[:, :14]\n last_2 = tgt_emotion[:, -2:]\n tgt_emotion_attr = torch.cat((first_14, last_2), dim=1)\n loss_emotion = eval_loss_emotion_func.forward(y_attr, tgt_emotion_attr)\n \n total_loss = LOSS_LAMBDA * loss_chord + (1-LOSS_LAMBDA) * loss_emotion\n\n sum_loss_chord += float(loss_chord)\n sum_loss_emotion += float(loss_emotion)\n sum_total_loss += float(total_loss)\n else:\n # use MusicTransformer no sep\n y = model(x,\n x_root,\n x_attr,\n feature_key)\n \n sum_acc += float(compute_vevo_accuracy(y, tgt ))\n cor = float(compute_vevo_correspondence(y, tgt, tgt_emotion, tgt_emotion_prob, EMOTION_THRESHOLD))\n \n if cor >= 0 :\n n_test_cor +=1\n sum_cor += cor\n\n sum_h1 += float(compute_hits_k(y, tgt,1))\n sum_h3 += float(compute_hits_k(y, tgt,3))\n sum_h5 += float(compute_hits_k(y, tgt,5))\n\n tgt_emotion = tgt_emotion.squeeze()\n \n y = y.reshape(y.shape[0] * y.shape[1], -1)\n tgt = tgt.flatten()\n loss_chord = eval_loss_func.forward(y, tgt)\n loss_emotion = eval_loss_emotion_func.forward(y, tgt_emotion)\n total_loss = loss_chord\n\n sum_loss_chord += float(loss_chord)\n sum_loss_emotion += float(loss_emotion)\n sum_total_loss += float(total_loss)\n\n avg_loss_chord = sum_loss_chord / n_test\n avg_loss_emotion = sum_loss_emotion / n_test\n avg_total_loss = sum_total_loss / n_test\n\n avg_acc = sum_acc / n_test\n avg_cor = sum_cor / n_test_cor\n \n avg_h1 = sum_h1 / n_test\n avg_h3 = sum_h3 / n_test\n avg_h5 = sum_h5 / n_test\n \n avg_acc_cor = (avg_acc + avg_cor)/ 2.0\n\n if isGenConfusionMatrix:\n chordInvDicPath = \"./dataset/vevo_meta/chord_inv.json\"\n chordRootInvDicPath = \"./dataset/vevo_meta/chord_root_inv.json\"\n chordAttrInvDicPath = \"./dataset/vevo_meta/chord_attr_inv.json\"\n \n with open(chordInvDicPath) as json_file:\n chordInvDic = json.load(json_file)\n with open(chordRootInvDicPath) as json_file:\n chordRootInvDic = json.load(json_file)\n with open(chordAttrInvDicPath) as json_file:\n chordAttrInvDic = json.load(json_file)\n\n # Confusion matrix (CHORD)\n topChordList = []\n with open(\"./dataset/vevo_meta/top_chord.txt\", encoding = 'utf-8') as f:\n for line in f:\n line = line.strip()\n line_arr = line.split(\" \")\n if len(line_arr) == 3 :\n chordID = line_arr[1]\n topChordList.append( int(chordID) )\n topChordList = np.array(topChordList)\n topChordList = topChordList[:10]\n mask = np.isin(true_labels, topChordList)\n true_labels = np.array(true_labels)[mask]\n pred_labels = np.array(pred_labels)[mask]\n\n conf_matrix = confusion_matrix(true_labels, pred_labels, labels=topChordList)\n label_names = [ chordInvDic[str(label_id)] for label_id in topChordList ]\n \n plt.figure(figsize=(8, 6))\n plt.imshow(conf_matrix, cmap=plt.cm.Blues)\n plt.title(\"Confusion Matrix\")\n plt.colorbar()\n tick_marks = np.arange(len(topChordList))\n plt.xticks(tick_marks, label_names, rotation=45)\n plt.yticks(tick_marks, label_names)\n thresh = conf_matrix.max() / 2.0\n for i in range(conf_matrix.shape[0]):\n for j in range(conf_matrix.shape[1]):\n plt.text(j, i, format(conf_matrix[i, j], 'd'),\n ha=\"center\", va=\"center\",\n color=\"white\" if conf_matrix[i, j] > thresh else \"black\")\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n plt.savefig(\"confusion_matrix.png\")\n plt.show()\n\n # Confusion matrix (CHORD ROOT) \n chordRootList = np.arange(1, 13)\n conf_matrix = confusion_matrix(true_root_labels, pred_root_labels, labels= chordRootList )\n \n label_names = [ chordRootInvDic[str(label_id)] for label_id in chordRootList ]\n \n plt.figure(figsize=(8, 6))\n plt.imshow(conf_matrix, cmap=plt.cm.Blues)\n plt.title(\"Confusion Matrix (Chord root)\")\n plt.colorbar()\n tick_marks = np.arange(len(chordRootList))\n plt.xticks(tick_marks, label_names, rotation=45)\n plt.yticks(tick_marks, label_names)\n thresh = conf_matrix.max() / 2.0\n for i in range(conf_matrix.shape[0]):\n for j in range(conf_matrix.shape[1]):\n plt.text(j, i, format(conf_matrix[i, j], 'd'),\n ha=\"center\", va=\"center\",\n color=\"white\" if conf_matrix[i, j] > thresh else \"black\")\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n plt.savefig(\"confusion_matrix_root.png\")\n plt.show()\n\n # Confusion matrix (CHORD ATTR)\n chordAttrList = np.arange(1, 14)\n conf_matrix = confusion_matrix(true_attr_labels, pred_attr_labels, labels= chordAttrList )\n \n label_names = [ chordAttrInvDic[str(label_id)] for label_id in chordAttrList ]\n \n plt.figure(figsize=(8, 6))\n plt.imshow(conf_matrix, cmap=plt.cm.Blues)\n plt.title(\"Confusion Matrix (Chord quality)\")\n plt.colorbar()\n tick_marks = np.arange(len(chordAttrList))\n plt.xticks(tick_marks, label_names, rotation=45)\n plt.yticks(tick_marks, label_names)\n thresh = conf_matrix.max() / 2.0\n for i in range(conf_matrix.shape[0]):\n for j in range(conf_matrix.shape[1]):\n plt.text(j, i, format(conf_matrix[i, j], 'd'),\n ha=\"center\", va=\"center\",\n color=\"white\" if conf_matrix[i, j] > thresh else \"black\")\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n plt.savefig(\"confusion_matrix_quality.png\")\n plt.show()\n\n return { \"avg_total_loss\" : avg_total_loss, \n \"avg_loss_chord\" : avg_loss_chord, \n \"avg_loss_emotion\": avg_loss_emotion, \n \"avg_acc\" : avg_acc, \n \"avg_cor\" : avg_cor, \n \"avg_acc_cor\" : avg_acc_cor, \n \"avg_h1\" : avg_h1, \n \"avg_h3\" : avg_h3,\n \"avg_h5\" : avg_h5 }" } ]
import os import csv import shutil import torch import torch.nn as nn from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from torch.optim import Adam from dataset.vevo_dataset import compute_vevo_accuracy, create_vevo_datasets from model.music_transformer import MusicTransformer from model.video_music_transformer import VideoMusicTransformer from model.loss import SmoothCrossEntropyLoss from utilities.constants import * from utilities.device import get_device, use_cuda from utilities.lr_scheduling import LrStepTracker, get_lr from utilities.argument_funcs import parse_train_args, print_train_args, write_model_params from utilities.run_model_vevo import train_epoch, eval_model from torch.utils.tensorboard import SummaryWriter
13,853
CSV_HEADER = ["Epoch", "Learn rate", "Avg Train loss (total)", "Avg Train loss (chord)", "Avg Train loss (emotion)", "Avg Eval loss (total)", "Avg Eval loss (chord)", "Avg Eval loss (emotion)"] BASELINE_EPOCH = -1 version = VERSION split_ver = SPLIT_VER split_path = "split_" + split_ver VIS_MODELS_ARR = [ "2d/clip_l14p" ] # main def main( vm = "" , isPrintArgs = True ): args = parse_train_args() if isPrintArgs: print_train_args(args) if vm != "": args.vis_models = vm if args.is_video: vis_arr = args.vis_models.split(" ") vis_arr.sort() vis_abbr_path = "" for v in vis_arr: vis_abbr_path = vis_abbr_path + "_" + VIS_ABBR_DIC[v] vis_abbr_path = vis_abbr_path[1:] else: vis_abbr_path = "no_video" if(args.force_cpu): use_cuda(False) print("WARNING: Forced CPU usage, expect model to perform slower") print("") os.makedirs( args.output_dir, exist_ok=True) os.makedirs( os.path.join( args.output_dir, version), exist_ok=True) ##### Output prep ##### params_file = os.path.join(args.output_dir, version, "model_params.txt") write_model_params(args, params_file) weights_folder = os.path.join(args.output_dir, version, "weights") os.makedirs(weights_folder, exist_ok=True) results_folder = os.path.join(args.output_dir, version) os.makedirs(results_folder, exist_ok=True) results_file = os.path.join(results_folder, "results.csv") best_loss_file = os.path.join(results_folder, "best_loss_weights.pickle") best_text = os.path.join(results_folder, "best_epochs.txt") ##### Tensorboard ##### if(args.no_tensorboard): tensorboard_summary = None else: tensorboad_dir = os.path.join(args.output_dir, version, "tensorboard") tensorboard_summary = SummaryWriter(log_dir=tensorboad_dir) train_dataset, val_dataset, _ = create_vevo_datasets( dataset_root = "./dataset/", max_seq_chord = args.max_sequence_chord, max_seq_video = args.max_sequence_video, vis_models = args.vis_models, emo_model = args.emo_model, split_ver = SPLIT_VER, random_seq = True, is_video = args.is_video) total_vf_dim = 0 if args.is_video: for vf in train_dataset[0]["semanticList"]: total_vf_dim += vf.shape[1] total_vf_dim += 1 # Scene_offset total_vf_dim += 1 # Motion # Emotion if args.emo_model.startswith("6c"): total_vf_dim += 6 else: total_vf_dim += 5 train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.n_workers, shuffle=True) val_loader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=args.n_workers) if args.is_video: model = VideoMusicTransformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence_midi=args.max_sequence_midi, max_sequence_video=args.max_sequence_video, max_sequence_chord=args.max_sequence_chord, total_vf_dim=total_vf_dim, rpr=args.rpr).to(get_device()) else: model = MusicTransformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence_midi=args.max_sequence_midi, max_sequence_chord=args.max_sequence_chord, rpr=args.rpr).to(get_device()) start_epoch = BASELINE_EPOCH if(args.continue_weights is not None): if(args.continue_epoch is None): print("ERROR: Need epoch number to continue from (-continue_epoch) when using continue_weights") assert(False) else: model.load_state_dict(torch.load(args.continue_weights)) start_epoch = args.continue_epoch elif(args.continue_epoch is not None): print("ERROR: Need continue weights (-continue_weights) when using continue_epoch") assert(False) ##### Lr Scheduler vs static lr ##### if(args.lr is None): if(args.continue_epoch is None): init_step = 0 else: init_step = args.continue_epoch * len(train_loader) lr = LR_DEFAULT_START
CSV_HEADER = ["Epoch", "Learn rate", "Avg Train loss (total)", "Avg Train loss (chord)", "Avg Train loss (emotion)", "Avg Eval loss (total)", "Avg Eval loss (chord)", "Avg Eval loss (emotion)"] BASELINE_EPOCH = -1 version = VERSION split_ver = SPLIT_VER split_path = "split_" + split_ver VIS_MODELS_ARR = [ "2d/clip_l14p" ] # main def main( vm = "" , isPrintArgs = True ): args = parse_train_args() if isPrintArgs: print_train_args(args) if vm != "": args.vis_models = vm if args.is_video: vis_arr = args.vis_models.split(" ") vis_arr.sort() vis_abbr_path = "" for v in vis_arr: vis_abbr_path = vis_abbr_path + "_" + VIS_ABBR_DIC[v] vis_abbr_path = vis_abbr_path[1:] else: vis_abbr_path = "no_video" if(args.force_cpu): use_cuda(False) print("WARNING: Forced CPU usage, expect model to perform slower") print("") os.makedirs( args.output_dir, exist_ok=True) os.makedirs( os.path.join( args.output_dir, version), exist_ok=True) ##### Output prep ##### params_file = os.path.join(args.output_dir, version, "model_params.txt") write_model_params(args, params_file) weights_folder = os.path.join(args.output_dir, version, "weights") os.makedirs(weights_folder, exist_ok=True) results_folder = os.path.join(args.output_dir, version) os.makedirs(results_folder, exist_ok=True) results_file = os.path.join(results_folder, "results.csv") best_loss_file = os.path.join(results_folder, "best_loss_weights.pickle") best_text = os.path.join(results_folder, "best_epochs.txt") ##### Tensorboard ##### if(args.no_tensorboard): tensorboard_summary = None else: tensorboad_dir = os.path.join(args.output_dir, version, "tensorboard") tensorboard_summary = SummaryWriter(log_dir=tensorboad_dir) train_dataset, val_dataset, _ = create_vevo_datasets( dataset_root = "./dataset/", max_seq_chord = args.max_sequence_chord, max_seq_video = args.max_sequence_video, vis_models = args.vis_models, emo_model = args.emo_model, split_ver = SPLIT_VER, random_seq = True, is_video = args.is_video) total_vf_dim = 0 if args.is_video: for vf in train_dataset[0]["semanticList"]: total_vf_dim += vf.shape[1] total_vf_dim += 1 # Scene_offset total_vf_dim += 1 # Motion # Emotion if args.emo_model.startswith("6c"): total_vf_dim += 6 else: total_vf_dim += 5 train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.n_workers, shuffle=True) val_loader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=args.n_workers) if args.is_video: model = VideoMusicTransformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence_midi=args.max_sequence_midi, max_sequence_video=args.max_sequence_video, max_sequence_chord=args.max_sequence_chord, total_vf_dim=total_vf_dim, rpr=args.rpr).to(get_device()) else: model = MusicTransformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence_midi=args.max_sequence_midi, max_sequence_chord=args.max_sequence_chord, rpr=args.rpr).to(get_device()) start_epoch = BASELINE_EPOCH if(args.continue_weights is not None): if(args.continue_epoch is None): print("ERROR: Need epoch number to continue from (-continue_epoch) when using continue_weights") assert(False) else: model.load_state_dict(torch.load(args.continue_weights)) start_epoch = args.continue_epoch elif(args.continue_epoch is not None): print("ERROR: Need continue weights (-continue_weights) when using continue_epoch") assert(False) ##### Lr Scheduler vs static lr ##### if(args.lr is None): if(args.continue_epoch is None): init_step = 0 else: init_step = args.continue_epoch * len(train_loader) lr = LR_DEFAULT_START
lr_stepper = LrStepTracker(args.d_model, SCHEDULER_WARMUP_STEPS, init_step)
7
2023-10-13 09:06:24+00:00
16k
RobotLocomotion/gcs-science-robotics
reproduction/bimanual/helpers.py
[ { "identifier": "BezierGCS", "path": "gcs/bezier.py", "snippet": "class BezierGCS(BaseGCS):\n def __init__(self, regions, order, continuity, edges=None, hdot_min=1e-6, full_dim_overlap=False):\n BaseGCS.__init__(self, regions)\n\n self.order = order\n self.continuity = continuity\n assert continuity < order\n\n A_time = np.vstack((np.eye(order + 1), -np.eye(order + 1),\n np.eye(order, order + 1) - np.eye(order, order + 1, 1)))\n b_time = np.concatenate((1e3*np.ones(order + 1), np.zeros(order + 1), -hdot_min * np.ones(order)))\n self.time_scaling_set = HPolyhedron(A_time, b_time)\n\n for i, r in enumerate(self.regions):\n self.gcs.AddVertex(\n r.CartesianPower(order + 1).CartesianProduct(self.time_scaling_set),\n name = self.names[i] if not self.names is None else '')\n\n # Formulate edge costs and constraints\n u_control = MakeMatrixContinuousVariable(\n self.dimension, order + 1, \"xu\")\n v_control = MakeMatrixContinuousVariable(\n self.dimension, order + 1, \"xv\")\n u_duration = MakeVectorContinuousVariable(order + 1, \"Tu\")\n v_duration = MakeVectorContinuousVariable(order + 1, \"Tv\")\n\n self.u_vars = np.concatenate((u_control.flatten(\"F\"), u_duration))\n self.u_r_trajectory = BsplineTrajectory_[Expression](\n BsplineBasis_[Expression](order + 1, order + 1, KnotVectorType.kClampedUniform, 0., 1.),\n u_control)\n self.u_h_trajectory = BsplineTrajectory_[Expression](\n BsplineBasis_[Expression](order + 1, order + 1, KnotVectorType.kClampedUniform, 0., 1.),\n np.expand_dims(u_duration, 0))\n\n edge_vars = np.concatenate((u_control.flatten(\"F\"), u_duration, v_control.flatten(\"F\"), v_duration))\n v_r_trajectory = BsplineTrajectory_[Expression](\n BsplineBasis_[Expression](order + 1, order + 1, KnotVectorType.kClampedUniform, 0., 1.),\n v_control)\n v_h_trajectory = BsplineTrajectory_[Expression](\n BsplineBasis_[Expression](order + 1, order + 1, KnotVectorType.kClampedUniform, 0., 1.),\n np.expand_dims(v_duration, 0))\n\n # Continuity constraints\n self.contin_constraints = []\n for deriv in range(continuity + 1):\n u_path_deriv = self.u_r_trajectory.MakeDerivative(deriv)\n v_path_deriv = v_r_trajectory.MakeDerivative(deriv)\n path_continuity_error = v_path_deriv.control_points()[0] - u_path_deriv.control_points()[-1]\n self.contin_constraints.append(LinearEqualityConstraint(\n DecomposeLinearExpressions(path_continuity_error, edge_vars),\n np.zeros(self.dimension)))\n\n u_time_deriv = self.u_h_trajectory.MakeDerivative(deriv)\n v_time_deriv = v_h_trajectory.MakeDerivative(deriv)\n time_continuity_error = v_time_deriv.control_points()[0] - u_time_deriv.control_points()[-1]\n self.contin_constraints.append(LinearEqualityConstraint(\n DecomposeLinearExpressions(time_continuity_error, edge_vars), 0.0))\n\n self.deriv_constraints = []\n self.edge_costs = []\n\n # Add edges to graph and apply costs/constraints\n if edges is None:\n if full_dim_overlap:\n edges = self.findEdgesViaFullDimensionOverlaps()\n else:\n edges = self.findEdgesViaOverlaps()\n\n vertices = self.gcs.Vertices()\n for ii, jj in edges:\n u = vertices[ii]\n v = vertices[jj]\n edge = self.gcs.AddEdge(u, v, f\"({u.name()}, {v.name()})\")\n\n for c_con in self.contin_constraints:\n edge.AddConstraint(Binding[Constraint](\n c_con, np.append(u.x(), v.x())))\n\n def addTimeCost(self, weight):\n assert isinstance(weight, float) or isinstance(weight, int)\n\n u_time_control = self.u_h_trajectory.control_points()\n segment_time = u_time_control[-1] - u_time_control[0]\n time_cost = LinearCost(\n weight * DecomposeLinearExpressions(segment_time, self.u_vars)[0], 0.)\n self.edge_costs.append(time_cost)\n\n for edge in self.gcs.Edges():\n if edge.u() == self.source:\n continue\n edge.AddCost(Binding[Cost](time_cost, edge.xu()))\n\n def addPathLengthCost(self, weight):\n if isinstance(weight, float) or isinstance(weight, int):\n weight_matrix = weight * np.eye(self.dimension)\n else:\n assert(len(weight) == self.dimension)\n weight_matrix = np.diag(weight)\n\n u_path_control = self.u_r_trajectory.MakeDerivative(1).control_points()\n for ii in range(len(u_path_control)):\n H = DecomposeLinearExpressions(u_path_control[ii] / self.order, self.u_vars)\n path_cost = L2NormCost(np.matmul(weight_matrix, H), np.zeros(self.dimension))\n self.edge_costs.append(path_cost)\n\n for edge in self.gcs.Edges():\n if edge.u() == self.source:\n continue\n edge.AddCost(Binding[Cost](path_cost, edge.xu()))\n\n def addPathLengthIntegralCost(self, weight, integration_points=100):\n if isinstance(weight, float) or isinstance(weight, int):\n weight_matrix = weight * np.eye(self.dimension)\n else:\n assert(len(weight) == self.dimension)\n weight_matrix = np.diag(weight)\n\n s_points = np.linspace(0., 1., integration_points + 1)\n u_path_deriv = self.u_r_trajectory.MakeDerivative(1)\n\n if u_path_deriv.basis().order() == 1:\n for t in [0.0, 1.0]:\n q_ds = u_path_deriv.value(t)\n costs = []\n for ii in range(self.dimension):\n costs.append(q_ds[ii])\n H = DecomposeLinearExpressions(costs, self.u_vars)\n integral_cost = L2NormCost(np.matmul(weight_matrix, H), np.zeros(self.dimension))\n self.edge_costs.append(integral_cost)\n\n for edge in self.gcs.Edges():\n if edge.u() == self.source:\n continue\n edge.AddCost(Binding[Cost](integral_cost, edge.xu()))\n else:\n q_ds = u_path_deriv.vector_values(s_points)\n for ii in range(integration_points + 1):\n costs = []\n for jj in range(self.dimension):\n if ii == 0 or ii == integration_points:\n costs.append(0.5 * 1./integration_points * q_ds[jj, ii])\n else:\n costs.append(1./integration_points * q_ds[jj, ii])\n H = DecomposeLinearExpressions(costs, self.u_vars)\n integral_cost = L2NormCost(np.matmul(weight_matrix, H), np.zeros(self.dimension))\n self.edge_costs.append(integral_cost)\n\n for edge in self.gcs.Edges():\n if edge.u() == self.source:\n continue\n edge.AddCost(Binding[Cost](integral_cost, edge.xu()))\n\n def addPathEnergyCost(self, weight):\n if isinstance(weight, float) or isinstance(weight, int):\n weight_matrix = weight * np.eye(self.dimension)\n else:\n assert(len(weight) == self.dimension)\n weight_matrix = np.diag(weight)\n\n u_path_control = self.u_r_trajectory.MakeDerivative(1).control_points()\n u_time_control = self.u_h_trajectory.MakeDerivative(1).control_points()\n for ii in range(len(u_path_control)):\n A_ctrl = DecomposeLinearExpressions(u_path_control[ii], self.u_vars)\n b_ctrl = DecomposeLinearExpressions(u_time_control[ii], self.u_vars)\n H = np.vstack(((self.order) * b_ctrl, np.matmul(np.sqrt(weight_matrix), A_ctrl)))\n energy_cost = PerspectiveQuadraticCost(H, np.zeros(H.shape[0]))\n self.edge_costs.append(energy_cost)\n\n for edge in self.gcs.Edges():\n if edge.u() == self.source:\n continue\n edge.AddCost(Binding[Cost](energy_cost, edge.xu()))\n\n def addDerivativeRegularization(self, weight_r, weight_h, order):\n\n assert isinstance(order, int) and 2 <= order <= self.order\n weights = [weight_r, weight_h]\n for weight in weights:\n assert isinstance(weight, float) or isinstance(weight, int)\n\n trajectories = [self.u_r_trajectory, self.u_h_trajectory]\n for traj, weight in zip(trajectories, weights):\n derivative_control = traj.MakeDerivative(order).control_points()\n for c in derivative_control:\n A_ctrl = DecomposeLinearExpressions(c, self.u_vars)\n H = A_ctrl.T.dot(A_ctrl) * 2 * weight / (1 + self.order - order)\n reg_cost = QuadraticCost(H, np.zeros(H.shape[0]), 0)\n self.edge_costs.append(reg_cost)\n\n for edge in self.gcs.Edges():\n if edge.u() == self.source:\n continue\n edge.AddCost(Binding[Cost](reg_cost, edge.xu()))\n\n def addVelocityLimits(self, lower_bound, upper_bound):\n assert len(lower_bound) == self.dimension\n assert len(upper_bound) == self.dimension\n\n u_path_control = self.u_r_trajectory.MakeDerivative(1).control_points()\n u_time_control = self.u_h_trajectory.MakeDerivative(1).control_points()\n lb = np.expand_dims(lower_bound, 1)\n ub = np.expand_dims(upper_bound, 1)\n\n for ii in range(len(u_path_control)):\n A_ctrl = DecomposeLinearExpressions(u_path_control[ii], self.u_vars)\n b_ctrl = DecomposeLinearExpressions(u_time_control[ii], self.u_vars)\n A_constraint = np.vstack((A_ctrl - ub * b_ctrl, -A_ctrl + lb * b_ctrl))\n velocity_con = LinearConstraint(\n A_constraint, -np.inf*np.ones(2*self.dimension), np.zeros(2*self.dimension))\n self.deriv_constraints.append(velocity_con)\n\n for edge in self.gcs.Edges():\n if edge.u() == self.source:\n continue\n edge.AddConstraint(Binding[Constraint](velocity_con, edge.xu()))\n\n def addSourceTarget(self, source, target, edges=None, velocity=None, zero_deriv_boundary=None):\n source_edges, target_edges = super().addSourceTarget(source, target, edges)\n\n if velocity is not None:\n assert velocity.shape == (2, self.dimension)\n\n u_path_control = self.u_r_trajectory.MakeDerivative(1).control_points()\n u_time_control = self.u_h_trajectory.MakeDerivative(1).control_points()\n initial_velocity_error = np.squeeze(u_path_control[0]) - velocity[0] * np.squeeze(u_time_control[0])\n final_velocity_error = np.squeeze(u_path_control[-1]) - velocity[1] * np.squeeze(u_time_control[-1])\n initial_velocity_con = LinearEqualityConstraint(\n DecomposeLinearExpressions(initial_velocity_error, self.u_vars),\n np.zeros(self.dimension))\n final_velocity_con = LinearEqualityConstraint(\n DecomposeLinearExpressions(final_velocity_error, self.u_vars),\n np.zeros(self.dimension))\n\n if zero_deriv_boundary is not None:\n assert self.order > zero_deriv_boundary + 1\n initial_constraints = []\n final_constraints = []\n\n for deriv in range(1, zero_deriv_boundary+1):\n u_path_control = self.u_r_trajectory.MakeDerivative(deriv).control_points()\n initial_constraints.append(LinearEqualityConstraint(\n DecomposeLinearExpressions(np.squeeze(u_path_control[0]), self.u_vars),\n np.zeros(self.dimension)))\n final_constraints.append(LinearEqualityConstraint(\n DecomposeLinearExpressions(np.squeeze(u_path_control[-1]), self.u_vars),\n np.zeros(self.dimension)))\n\n for edge in source_edges:\n for jj in range(self.dimension):\n edge.AddConstraint(edge.xu()[jj] == edge.xv()[jj])\n\n if velocity is not None:\n edge.AddConstraint(Binding[Constraint](initial_velocity_con, edge.xv()))\n if zero_deriv_boundary is not None:\n for i_con in initial_constraints:\n edge.AddConstraint(Binding[Constraint](i_con, edge.xv()))\n\n edge.AddConstraint(edge.xv()[-(self.order + 1)] == 0.)\n\n for edge in target_edges: \n for jj in range(self.dimension):\n edge.AddConstraint(\n edge.xu()[-(self.dimension + self.order + 1) + jj] == edge.xv()[jj])\n\n if velocity is not None:\n edge.AddConstraint(Binding[Constraint](final_velocity_con, edge.xu()))\n if zero_deriv_boundary is not None:\n for f_con in final_constraints:\n edge.AddConstraint(Binding[Constraint](f_con, edge.xu()))\n\n for cost in self.edge_costs:\n edge.AddCost(Binding[Cost](cost, edge.xu()))\n\n for d_con in self.deriv_constraints:\n edge.AddConstraint(Binding[Constraint](d_con, edge.xu()))\n\n\n def SolvePath(self, rounding=False, verbose=False, preprocessing=False):\n best_path, best_result, results_dict = self.solveGCS(\n rounding, preprocessing, verbose)\n\n if best_path is None:\n return None, results_dict\n\n # Extract trajectory control points\n knots = np.zeros(self.order + 1)\n path_control_points = []\n time_control_points = []\n for edge in best_path:\n if edge.v() == self.target:\n knots = np.concatenate((knots, [knots[-1]]))\n path_control_points.append(best_result.GetSolution(edge.xv()))\n time_control_points.append(np.array([best_result.GetSolution(edge.xu())[-1]]))\n break\n edge_time = knots[-1] + 1.\n knots = np.concatenate((knots, np.full(self.order, edge_time)))\n edge_path_points = np.reshape(best_result.GetSolution(edge.xv())[:-(self.order + 1)],\n (self.dimension, self.order + 1), \"F\")\n edge_time_points = best_result.GetSolution(edge.xv())[-(self.order + 1):]\n for ii in range(self.order):\n path_control_points.append(edge_path_points[:, ii])\n time_control_points.append(np.array([edge_time_points[ii]]))\n\n offset = time_control_points[0].copy()\n for ii in range(len(time_control_points)):\n time_control_points[ii] -= offset\n\n path_control_points = np.array(path_control_points).T\n time_control_points = np.array(time_control_points).T\n\n path = BsplineTrajectory(BsplineBasis(self.order + 1, knots), path_control_points)\n time_traj = BsplineTrajectory(BsplineBasis(self.order + 1, knots), time_control_points)\n\n return BezierTrajectory(path, time_traj), results_dict" }, { "identifier": "LinearGCS", "path": "gcs/linear.py", "snippet": "class LinearGCS(BaseGCS):\n def __init__(self, regions, edges=None, path_weights=None, full_dim_overlap=False):\n BaseGCS.__init__(self, regions)\n\n if path_weights is None:\n path_weights = np.ones(self.dimension)\n elif isinstance(path_weights, float) or isinstance(path_weights, int):\n path_weights = path_weights * np.ones(self.dimension)\n assert len(path_weights) == self.dimension\n\n self.edge_cost = L2NormCost(\n np.hstack((np.diag(-path_weights), np.diag(path_weights))),\n np.zeros(self.dimension))\n\n for i, r in enumerate(self.regions):\n self.gcs.AddVertex(r, name = self.names[i] if not self.names is None else '')\n\n if edges is None:\n if full_dim_overlap:\n edges = self.findEdgesViaFullDimensionOverlaps()\n else:\n edges = self.findEdgesViaOverlaps()\n\n vertices = self.gcs.Vertices()\n for ii, jj in edges:\n u = vertices[ii]\n v = vertices[jj]\n edge = self.gcs.AddEdge(u, v, f\"({u.name()}, {v.name()})\")\n\n edge_length = edge.AddCost(Binding[Cost](\n self.edge_cost, np.append(u.x(), v.x())))[1]\n\n # Constrain point in v to be in u\n edge.AddConstraint(Binding[Constraint](\n LinearConstraint(u.set().A(),\n -np.inf*np.ones(len(u.set().b())),\n u.set().b()),\n v.x()))\n\n def addSourceTarget(self, source, target, edges=None):\n source_edges, target_edges = super().addSourceTarget(source, target, edges)\n\n for edge in source_edges:\n for jj in range(self.dimension):\n edge.AddConstraint(edge.xu()[jj] == edge.xv()[jj])\n\n for edge in target_edges:\n edge.AddCost(Binding[Cost](\n self.edge_cost, np.append(edge.xu(), edge.xv())))\n\n\n def SolvePath(self, rounding=False, verbose=False, preprocessing=False):\n best_path, best_result, results_dict = self.solveGCS(\n rounding, preprocessing, verbose)\n\n if best_path is None:\n return None, results_dict\n\n # Extract trajectory\n waypoints = np.empty((self.dimension, 0))\n for edge in best_path:\n new_waypoint = best_result.GetSolution(edge.xv())\n waypoints = np.concatenate(\n [waypoints, np.expand_dims(new_waypoint, 1)], axis=1)\n\n return waypoints, results_dict" }, { "identifier": "set_transparency_of_models", "path": "reproduction/prm_comparison/helpers.py", "snippet": "def set_transparency_of_models(plant, model_instances, alpha, scene_graph):\n \"\"\"Sets the transparency of the given models.\"\"\"\n inspector = scene_graph.model_inspector()\n for model in model_instances:\n for body_id in plant.GetBodyIndices(model):\n frame_id = plant.GetBodyFrameIdOrThrow(body_id)\n for geometry_id in inspector.GetGeometries(frame_id,\n Role.kIllustration):\n properties = inspector.GetIllustrationProperties(geometry_id)\n phong = properties.GetProperty(\"phong\", \"diffuse\")\n phong.set(phong.r(), phong.g(), phong.b(), alpha)\n properties.UpdateProperty(\"phong\", \"diffuse\", phong)\n scene_graph.AssignRole(plant.get_source_id(), geometry_id,\n properties, RoleAssign.kReplace)" } ]
import numpy as np import os import time from copy import copy from pydrake.common import FindResourceOrThrow from pydrake.geometry import ( CollisionFilterDeclaration, GeometrySet, MeshcatVisualizer, Rgba, Role, SceneGraph ) from pydrake.math import RigidTransform, RollPitchYaw, RotationMatrix from pydrake.multibody.inverse_kinematics import InverseKinematics from pydrake.multibody.parsing import LoadModelDirectives, Parser, ProcessModelDirectives from pydrake.multibody.plant import AddMultibodyPlantSceneGraph, MultibodyPlant from pydrake.perception import PointCloud from pydrake.solvers import MosekSolver, Solve from pydrake.systems.analysis import Simulator from pydrake.systems.framework import DiagramBuilder, LeafSystem from pydrake.systems.primitives import TrajectorySource from pydrake.systems.rendering import MultibodyPositionToGeometryPose from gcs.bezier import BezierGCS from gcs.linear import LinearGCS from gcs.rounding import * from reproduction.prm_comparison.helpers import set_transparency_of_models from reproduction.util import *
12,267
elif "shelves::" in gid_name: shelf.append(gid) elif "binR" in gid_name: bins[0].append(gid) elif "binL" in gid_name: bins[1].append(gid) elif "table" in gid_name: table.append(gid) else: print("Geometry", gid_name, "not assigned to an object.") filter_manager.Apply(CollisionFilterDeclaration().ExcludeWithin( GeometrySet(iiwa1[0] + iiwa1[1] + iiwa1[2] + iiwa1[3] + shelf))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[1] + iiwa1[2]+ iiwa1[3]), GeometrySet(iiwa1[4] + iiwa1[5]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[3] + iiwa1[4]), GeometrySet(iiwa1[6]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[2] + iiwa1[3] + iiwa1[4] + iiwa1[5] + iiwa1[6]), GeometrySet(iiwa1[7] + wsg1))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[0] + iiwa1[0] + iiwa1[2]), GeometrySet(bins[0]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[0] + iiwa1[1] + iiwa1[2] + iiwa1[3] + iiwa1[4]), GeometrySet(bins[1]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[0] + iiwa1[0] + iiwa1[2]), GeometrySet(table))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeWithin( GeometrySet(iiwa2[0] + iiwa2[1] + iiwa2[2] + iiwa2[3] + shelf))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[1] + iiwa2[2]+ iiwa2[3]), GeometrySet(iiwa2[4] + iiwa2[5]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[3] + iiwa2[4]), GeometrySet(iiwa2[6]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[2] + iiwa2[3] + iiwa2[4] + iiwa2[5] + iiwa2[6]), GeometrySet(iiwa2[7] + wsg2))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[0] + iiwa2[0] + iiwa2[2]), GeometrySet(bins[1]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[0] + iiwa2[1] + iiwa2[2] + iiwa2[3] + iiwa2[4]), GeometrySet(bins[0]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[0] + iiwa2[0] + iiwa2[2]), GeometrySet(table))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[0] + iiwa1[1]), GeometrySet(iiwa2[0] + iiwa2[1]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[2]), GeometrySet(iiwa2[0] + iiwa2[1]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[0] + iiwa1[1]), GeometrySet(iiwa2[2]))) pairs = scene_graph.get_query_output_port().Eval(context).inspector().GetCollisionCandidates() print("Filtered collision pairs from", len(inspector.GetCollisionCandidates()), "to", len(pairs)) # initial_guess = np.concatenate((q0, q0)) # min_dist = (0.01, 0.01)??? def runBimanualIK(plant, context, wsg1_id, wsg2_id, wsg1_pose, wsg2_pose, initial_guess, min_dist=None): hand_frame1 = plant.GetBodyByName("body", wsg1_id).body_frame() hand_frame2 = plant.GetBodyByName("body", wsg2_id).body_frame() ik = InverseKinematics(plant, context) if min_dist is not None: ik.AddMinimumDistanceConstraint(*min_dist) ik.prog().AddBoundingBoxConstraint(plant.GetPositionLowerLimits(), plant.GetPositionUpperLimits(), ik.q()) ik.prog().SetInitialGuess(ik.q(), initial_guess) ik.prog().AddQuadraticCost((ik.q() - initial_guess).dot(ik.q() - initial_guess)) ik.AddPositionConstraint(hand_frame1, [0, 0, 0], plant.world_frame(), wsg1_pose.translation(), wsg1_pose.translation()) ik.AddOrientationConstraint(hand_frame1, RotationMatrix(), plant.world_frame(), wsg1_pose.rotation(), 0.001) ik.AddPositionConstraint(hand_frame2, [0, 0, 0], plant.world_frame(), wsg2_pose.translation(), wsg2_pose.translation()) ik.AddOrientationConstraint(hand_frame2, RotationMatrix(), plant.world_frame(), wsg2_pose.rotation(), 0.001) result = Solve(ik.prog()) return result.GetSolution(ik.q()) def visualizeConfig(diagram, plant, context, q): plant_context = plant.GetMyMutableContextFromRoot(context) plant.SetPositions(plant_context, q) diagram.ForcedPublish(context) def getLinearGcsPath(regions, sequence): path = [sequence[0]] run_time = 0.0 gcs = LinearGCS(regions) gcs.setPaperSolverOptions() gcs.setSolver(MosekSolver()) for start_pt, goal_pt in zip(sequence[:-1], sequence[1:]): gcs.addSourceTarget(start_pt, goal_pt) start_time = time.time() waypoints, results_dict = gcs.SolvePath(True, False, preprocessing=True) if waypoints is None: print(f"Failed between {start_pt} and {goal_pt}") return None print(f"Planned segment in {np.round(time.time() - start_time, 4)}", flush=True) # run_time += results_dict["preprocessing_stats"]['linear_programs'] run_time += results_dict["relaxation_solver_time"] run_time += results_dict["total_rounded_solver_time"] path += waypoints.T[1:].tolist() gcs.ResetGraph() return np.stack(path).T, run_time def getBezierGcsPath(plant, regions, sequence, order, continuity, hdot_min = 1e-3): run_time = [] trajectories = []
def getIkSeeds(): return { "top_shelf/top_shelf": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.9]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.9])), "top_shelf/shelf_1": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.9]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.65])), "top_shelf/shelf_2": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.9]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.4])), "top_shelf/bin_L": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.9]), RigidTransform(RollPitchYaw(-np.pi/2+0.1, 0, np.pi), [0., 1.1, 0.3])), "shelf_1/top_shelf": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.65]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.9])), "shelf_1/shelf_1": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.65]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.65])), "shelf_1/shelf_2": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.65]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.4])), "shelf_1/bin_L": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.65]), RigidTransform(RollPitchYaw(-np.pi/2+0.1, 0, np.pi), [0., 1.1, 0.3])), "shelf_2/top_shelf": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.4]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.9])), "shelf_2/shelf_1": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.4]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.65])), "shelf_2/shelf_2": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.4]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.4])), "shelf_2/bin_L": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.4]), RigidTransform(RollPitchYaw(-np.pi/2+0.1, 0, np.pi), [0., 1.1, 0.3])), "bin_R/top_shelf": (RigidTransform(RollPitchYaw(-np.pi/2+0.1, 0, -np.pi), [0.0, -0.6, 0.3]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.9])), "bin_R/shelf_1": (RigidTransform(RollPitchYaw(-np.pi/2+0.1, 0, -np.pi), [0.0, -0.6, 0.3]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.65])), "bin_R/shelf_2": (RigidTransform(RollPitchYaw(-np.pi/2+0.1, 0, -np.pi), [0.0, -0.6, 0.3]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.4])), "bin_R/bin_L": (RigidTransform(RollPitchYaw(-np.pi/2+0.1, 0, -np.pi), [0.0, -0.6, 0.3]), RigidTransform(RollPitchYaw(-np.pi/2+0.1, 0, np.pi), [0., 1.1, 0.3])), "top_shelf/shelf_1_extract": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.9]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.5, 0.35, 0.65])), "top_shelf/shelf_2_extract": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.9]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.5, 0.35, 0.4])), "shelf_2_extract/top_shelf": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.5, 0.15, 0.4]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.9])), "shelf_1_extract/top_shelf": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.5, 0.15, 0.65]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.9])), "top_shelf/shelf_1_cross": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.9]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2-0.3), [0.7, 0.15, 0.65])), "cross_table/top_shelf_cross": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi), [0.4, 0.4, 0.2]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.9])), "shelf_2_cross/top_shelf_cross": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2+0.4), [0.7, 0.35, 0.4]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2-0.4), [0.7, 0.15, 0.9])), } def getConfigurationSeeds(): return { "top_shelf/top_shelf": [0.37080011, 0.41394084, -0.16861973, -0.70789778, -0.37031516, 0.60412162, 0.39982981, -0.37080019, 0.41394089, 0.16861988, -0.70789766, 0.37031506, 0.60412179, -0.39982996], "top_shelf/shelf_1": [0.37080079, 0.41394132, -0.16862043, -0.70789679, -0.37031656, 0.60412327, 0.39982969, -0.93496924, 0.46342534, 0.92801666, -1.45777635, -0.31061724, -0.0657716, -0.06019899], "top_shelf/shelf_2": [0.37086448, 0.41394538, -0.16875166, -0.70789745, -0.37020563, 0.60411217, 0.399785, -0.4416204 , 0.62965228, 0.20598405, -1.73324339, -0.41354372, -0.68738414, 0.17443976], "top_shelf/bin_L": [0.37081989, 0.41394235, -0.16866012, -0.70789737, -0.37028201, 0.60411923, 0.39981634, -0.89837331, -1.1576151 , 1.75505216, -1.37515153, 1.0676443 , 1.56371166, -0.64126346], "shelf_1/top_shelf": [0.93496924, 0.46342534, -0.92801666, -1.45777635, 0.31061724, -0.0657716 , 0.06019899, -0.37080079, 0.41394132, 0.16862043, -0.70789679, 0.37031656, 0.60412327, -0.39982969], "shelf_1/shelf_1": [0.87224109, 0.43096634, -0.82223436, -1.45840049, 0.73813452, -0.08999384, -0.41624203, -0.87556489, 0.43246906, 0.82766047, -1.45838515, -0.72259842, -0.0884963, 0.39840129], "shelf_1/shelf_2": [0.93496866, 0.463425 , -0.92801564, -1.45777634, 0.3106235, -0.06577172, 0.06019173, -0.44158858, 0.62964838, 0.20594112, -1.73324341, -0.41354987, -0.6873923 , 0.17446778], "shelf_1/bin_L": [0.93496918, 0.46342531, -0.92801656, -1.45777637, 0.31061728, -0.06577167, 0.06019927, -0.89837321, -1.15761746, 1.75504915, -1.37515113, 1.06764716, 1.56371454, -0.64126383], "shelf_2/top_shelf": [0.4416204, 0.62965228, -0.20598405, -1.73324339, 0.41354372, -0.68738414, -0.17443976, -0.37086448, 0.41394538, 0.16875166, -0.70789745, 0.37020563, 0.60411217, -0.399785], "shelf_2/shelf_1": [0.44158858, 0.62964838, -0.20594112, -1.73324341, 0.41354987, -0.6873923, -0.17446778, -0.93496866, 0.463425 , 0.92801564, -1.45777634, -0.3106235 , -0.06577172, -0.06019173], "shelf_2/shelf_2": [0.44161313, 0.62965141, -0.20597435, -1.73324346, 0.41354447, -0.68738613, -0.17444557, -0.4416132 , 0.62965142, 0.20597452, -1.73324348, -0.41354416, -0.68738609, 0.17444625], "shelf_2/bin_L": [0.44161528, 0.62965169, -0.20597726, -1.73324347, 0.41354399, -0.68738565, -0.17444283, -1.37292761, -0.68372976, 2.96705973, -1.41521783, 2.96705973, -1.11343251, -3.0140737 ], "bin_R/top_shelf": [0.81207926, -1.25359738, -1.58098625, -1.5155474 , -1.32223687, 1.50549708, -2.38221725, -0.37085114, 0.4139444 , 0.16872443, -0.70789757, 0.37022786, 0.60411401, -0.39979449], "bin_R/shelf_1": [0.81207923, -1.25358454, -1.58100042, -1.51554769, -1.32222337, 1.50548369, -2.3822204 , -0.9349716 , 0.46342674, 0.92802082, -1.45777624, -0.31059455, -0.0657707 , -0.06022391], "bin_R/shelf_2": [0.81207937, -1.25360462, -1.58097816, -1.51554761, -1.32224557, 1.50550485, -2.38221483, -0.44166552, 0.62965782, 0.20604497, -1.7332434 , -0.41353464, -0.6873727 , 0.17439863], "bin_R/bin_L": [-1.73637519, 0.6209681 , 0.24232887, -1.51538355, -0.17977474, 0.92618894, -3.01360257, 1.31861497, 0.72394333, 0.4044295 , -1.37509496, -0.27461997, 1.20038493, 0.18611701], "neutral/neutral": [0.0, -0.2, 0, -1.2, 0, 1.6, 0.0, 0.0, -0.2, 0, -1.2, 0, 1.6, 0.0], "neutral/shelf_1": [0.0, -0.2, 0, -1.2, 0, 1.6, 0.0, -0.93496866, 0.463425 , 0.92801564, -1.45777634, -0.3106235 , -0.06577172, -0.06019173], "neutral/shelf_2": [0.0, -0.2, 0, -1.2, 0, 1.6, 0.0, -0.44166552, 0.62965782, 0.20604497, -1.7332434 , -0.41353464, -0.6873727 , 0.17439863], "shelf_1/neutral": [0.93496924, 0.46342534, -0.92801666, -1.45777635, 0.31061724, -0.0657716 , 0.06019899, 0.0, -0.2, 0, -1.2, 0, 1.6, 0.0], "shelf_2/neutral": [0.44161528, 0.62965169, -0.20597726, -1.73324347, 0.41354399, -0.68738565, -0.17444283, 0.0, -0.2, 0, -1.2, 0, 1.6, 0.0], "shelf_2_cross/top_shelf_cross": [0.47500706, 0.72909874, 0.01397772, -1.52841372, 0.15392366, -0.591641, -0.12870521, -0.48821156, 0.67762534, 0.02049926, -0.27420758, 0.10620709, 0.72215209, -0.09973172], } # Additional seed points not needed to connect the graph # "neutral/shelf_1_extract": [ 0.0, -0.2, 0, -1.2, 0, 1.6, 0.0, -0.35486829, -0.10621117, -0.09276445, -1.94995786, 1.88826556, 0.46922151, -1.98267349], # "neutral/shelf_2_extract": [ 0.0, -0.2, 0, -1.2, 0, 1.6, 0.0, 0.3078069 , 0.56765359, -0.86829439, -2.0943951 , 2.53950045, 1.09607546, -2.4169564], # "shelf_1_extract/neutral": [-1.05527083, -0.43710629, 1.15648812, -1.95011062, 0.24422131, -0.07820216, 0.15872416, 0.0, -0.2, 0, -1.2, 0, 1.6, 0.0], # "shelf_2_extract/neutral": [-0.30739053, 0.5673891 , 0.86772198, -2.0943951 , -2.53946773, 1.09586777, 2.41729532, 0.0, -0.2, 0, -1.2, 0, 1.6, 0.0], # "cross_table/top_shelf_cross": [ 0.04655887, 0.97997658, 0.52004246, -1.91926412, -1.37518707, -0.88823968, 0.07674699, -0.5921624 , 0.83651867, 0.20513136, -0.00257881, 0.51748756, 0.92012332, -0.51686487], def getDemoConfigurations(): return [ [0.0, -0.2, 0, -1.2, 0, 1.6, 0.0, 0.0, -0.2, 0, -1.2, 0, 1.6, 0.0], [0.69312848, 0.36303784, -0.66625368, -1.49515991, 0.3230085, -0.10942887, -0.09496304, -0.69312891, 0.36303794, 0.66625426, -1.49515975, -0.32300928, -0.10942832, 0.0949629], [0.2014604, 0.66463495, 0.16799372, -1.66212763, -0.09131682, -0.64368844, -0.03645568, -0.38777291, 0.56141139, -0.05760515, -0.47447495, 0.06515541, 0.63627899, -0.02552148], [-1.8487163 , 0.71749397, 0.66464618, -1.4912954 , -0.52882233, 1.0096015 , -2.62844995, 1.43620829, 0.70451542, -0.01532988, -1.34999693, -0.00550105, 1.18684923, -0.14400234], ] def generateDemoConfigurations(plant, context, wsg1_id, wsg2_id): demo_q = [[0.0, -0.2, 0, -1.2, 0, 1.6, 0.0, 0.0, -0.2, 0, -1.2, 0, 1.6, 0.0]] initial_guess = copy(demo_q[0]) demo_q.append(runBimanualIK( plant, context, wsg1_id, wsg2_id, RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.10, 0.65]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.40, 0.65]), initial_guess, (0.01, 0.01))) demo_q.append(runBimanualIK( plant, context, wsg1_id, wsg2_id, RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2+0.4), [0.7, 0.25, 0.4]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2-0.4), [0.7, 0.20, 0.9]), initial_guess, None)) initial_guess[0] = -np.pi/2 initial_guess[7] = np.pi/2 demo_q.append(runBimanualIK( plant, context, wsg1_id, wsg2_id, RigidTransform(RollPitchYaw(-np.pi/2+0.1, 0, -np.pi), [0.09, -0.6, 0.3]), RigidTransform(RollPitchYaw(-np.pi/2+0.1, 0, np.pi), [0.09, 1.1, 0.3]), initial_guess, None)) return demo_q def filterCollsionGeometry(scene_graph, context): filter_manager = scene_graph.collision_filter_manager(context) inspector = scene_graph.model_inspector() iiwa1 = [[], [], [], [], [], [], [], []] iiwa2 = [[], [], [], [], [], [], [], []] wsg1 = [] wsg2 = [] shelf = [] bins = [[], []] table = [] for gid in inspector.GetGeometryIds( GeometrySet(inspector.GetAllGeometryIds()), Role.kProximity): gid_name = inspector.GetName(inspector.GetFrameId(gid)) if "iiwa_1::iiwa_link_" in gid_name: link_num = gid_name[18] iiwa1[int(link_num)].append(gid) elif "iiwa_2::iiwa_link_" in gid_name: link_num = gid_name[18] iiwa2[int(link_num)].append(gid) elif "wsg_1" in gid_name: wsg1.append(gid) elif "wsg_2" in gid_name: wsg2.append(gid) elif "shelves::" in gid_name: shelf.append(gid) elif "binR" in gid_name: bins[0].append(gid) elif "binL" in gid_name: bins[1].append(gid) elif "table" in gid_name: table.append(gid) else: print("Geometry", gid_name, "not assigned to an object.") filter_manager.Apply(CollisionFilterDeclaration().ExcludeWithin( GeometrySet(iiwa1[0] + iiwa1[1] + iiwa1[2] + iiwa1[3] + shelf))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[1] + iiwa1[2]+ iiwa1[3]), GeometrySet(iiwa1[4] + iiwa1[5]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[3] + iiwa1[4]), GeometrySet(iiwa1[6]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[2] + iiwa1[3] + iiwa1[4] + iiwa1[5] + iiwa1[6]), GeometrySet(iiwa1[7] + wsg1))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[0] + iiwa1[0] + iiwa1[2]), GeometrySet(bins[0]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[0] + iiwa1[1] + iiwa1[2] + iiwa1[3] + iiwa1[4]), GeometrySet(bins[1]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[0] + iiwa1[0] + iiwa1[2]), GeometrySet(table))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeWithin( GeometrySet(iiwa2[0] + iiwa2[1] + iiwa2[2] + iiwa2[3] + shelf))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[1] + iiwa2[2]+ iiwa2[3]), GeometrySet(iiwa2[4] + iiwa2[5]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[3] + iiwa2[4]), GeometrySet(iiwa2[6]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[2] + iiwa2[3] + iiwa2[4] + iiwa2[5] + iiwa2[6]), GeometrySet(iiwa2[7] + wsg2))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[0] + iiwa2[0] + iiwa2[2]), GeometrySet(bins[1]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[0] + iiwa2[1] + iiwa2[2] + iiwa2[3] + iiwa2[4]), GeometrySet(bins[0]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[0] + iiwa2[0] + iiwa2[2]), GeometrySet(table))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[0] + iiwa1[1]), GeometrySet(iiwa2[0] + iiwa2[1]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[2]), GeometrySet(iiwa2[0] + iiwa2[1]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[0] + iiwa1[1]), GeometrySet(iiwa2[2]))) pairs = scene_graph.get_query_output_port().Eval(context).inspector().GetCollisionCandidates() print("Filtered collision pairs from", len(inspector.GetCollisionCandidates()), "to", len(pairs)) # initial_guess = np.concatenate((q0, q0)) # min_dist = (0.01, 0.01)??? def runBimanualIK(plant, context, wsg1_id, wsg2_id, wsg1_pose, wsg2_pose, initial_guess, min_dist=None): hand_frame1 = plant.GetBodyByName("body", wsg1_id).body_frame() hand_frame2 = plant.GetBodyByName("body", wsg2_id).body_frame() ik = InverseKinematics(plant, context) if min_dist is not None: ik.AddMinimumDistanceConstraint(*min_dist) ik.prog().AddBoundingBoxConstraint(plant.GetPositionLowerLimits(), plant.GetPositionUpperLimits(), ik.q()) ik.prog().SetInitialGuess(ik.q(), initial_guess) ik.prog().AddQuadraticCost((ik.q() - initial_guess).dot(ik.q() - initial_guess)) ik.AddPositionConstraint(hand_frame1, [0, 0, 0], plant.world_frame(), wsg1_pose.translation(), wsg1_pose.translation()) ik.AddOrientationConstraint(hand_frame1, RotationMatrix(), plant.world_frame(), wsg1_pose.rotation(), 0.001) ik.AddPositionConstraint(hand_frame2, [0, 0, 0], plant.world_frame(), wsg2_pose.translation(), wsg2_pose.translation()) ik.AddOrientationConstraint(hand_frame2, RotationMatrix(), plant.world_frame(), wsg2_pose.rotation(), 0.001) result = Solve(ik.prog()) return result.GetSolution(ik.q()) def visualizeConfig(diagram, plant, context, q): plant_context = plant.GetMyMutableContextFromRoot(context) plant.SetPositions(plant_context, q) diagram.ForcedPublish(context) def getLinearGcsPath(regions, sequence): path = [sequence[0]] run_time = 0.0 gcs = LinearGCS(regions) gcs.setPaperSolverOptions() gcs.setSolver(MosekSolver()) for start_pt, goal_pt in zip(sequence[:-1], sequence[1:]): gcs.addSourceTarget(start_pt, goal_pt) start_time = time.time() waypoints, results_dict = gcs.SolvePath(True, False, preprocessing=True) if waypoints is None: print(f"Failed between {start_pt} and {goal_pt}") return None print(f"Planned segment in {np.round(time.time() - start_time, 4)}", flush=True) # run_time += results_dict["preprocessing_stats"]['linear_programs'] run_time += results_dict["relaxation_solver_time"] run_time += results_dict["total_rounded_solver_time"] path += waypoints.T[1:].tolist() gcs.ResetGraph() return np.stack(path).T, run_time def getBezierGcsPath(plant, regions, sequence, order, continuity, hdot_min = 1e-3): run_time = [] trajectories = []
gcs = BezierGCS(regions, order, continuity)
0
2023-10-13 00:27:32+00:00
16k
LeapLabTHU/Rank-DETR
projects/dab_deformable_detr/configs/models/dab_deformable_detr_r50.py
[ { "identifier": "HungarianMatcher", "path": "detrex/modeling/matcher/matcher.py", "snippet": "class HungarianMatcher(nn.Module):\n \"\"\"HungarianMatcher which computes an assignment between targets and predictions.\n\n For efficiency reasons, the targets don't include the no_object. Because of this, in general,\n there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,\n while the others are un-matched (and thus treated as non-objects).\n\n Args:\n cost_class (float): The relative weight of the classification error\n in the matching cost. Default: 1.\n cost_bbox (float): The relative weight of the L1 error of the bounding box\n coordinates in the matching cost. Default: 1.\n cost_giou (float): This is the relative weight of the giou loss of\n the bounding box in the matching cost. Default: 1.\n cost_class_type (str): How the classification error is calculated.\n Choose from ``[\"ce_cost\", \"focal_loss_cost\"]``. Default: \"focal_loss_cost\".\n alpha (float): Weighting factor in range (0, 1) to balance positive vs\n negative examples in focal loss. Default: 0.25.\n gamma (float): Exponent of modulating factor (1 - p_t) to balance easy vs\n hard examples in focal loss. Default: 2.\n \"\"\"\n\n def __init__(\n self,\n cost_class: float = 1,\n cost_bbox: float = 1,\n cost_giou: float = 1,\n cost_class_type: str = \"focal_loss_cost\",\n alpha: float = 0.25,\n gamma: float = 2.0,\n ):\n super().__init__()\n self.cost_class = cost_class\n self.cost_bbox = cost_bbox\n self.cost_giou = cost_giou\n self.cost_class_type = cost_class_type\n self.alpha = alpha\n self.gamma = gamma\n assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, \"all costs cant be 0\"\n assert cost_class_type in {\n \"ce_cost\",\n \"focal_loss_cost\",\n }, \"only support ce loss or focal loss for computing class cost\"\n\n @torch.no_grad()\n def forward(self, outputs, targets):\n \"\"\"Forward function for `HungarianMatcher` which performs the matching.\n\n Args:\n outputs (Dict[str, torch.Tensor]): This is a dict that contains at least these entries:\n\n - ``\"pred_logits\"``: Tensor of shape (bs, num_queries, num_classes) with the classification logits.\n - ``\"pred_boxes\"``: Tensor of shape (bs, num_queries, 4) with the predicted box coordinates.\n\n targets (List[Dict[str, torch.Tensor]]): This is a list of targets (len(targets) = batch_size),\n where each target is a dict containing:\n\n - ``\"labels\"``: Tensor of shape (num_target_boxes, ) (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels. # noqa\n - ``\"boxes\"``: Tensor of shape (num_target_boxes, 4) containing the target box coordinates.\n\n Returns:\n list[torch.Tensor]: A list of size batch_size, containing tuples of `(index_i, index_j)` where:\n\n - ``index_i`` is the indices of the selected predictions (in order)\n - ``index_j`` is the indices of the corresponding selected targets (in order)\n\n For each batch element, it holds: `len(index_i) = len(index_j) = min(num_queries, num_target_boxes)`\n \"\"\"\n bs, num_queries = outputs[\"pred_logits\"].shape[:2]\n\n # We flatten to compute the cost matrices in a batch\n if self.cost_class_type == \"ce_cost\":\n out_prob = (\n outputs[\"pred_logits\"].flatten(0, 1).softmax(-1)\n ) # [batch_size * num_queries, num_classes]\n elif self.cost_class_type == \"focal_loss_cost\":\n out_prob = (\n outputs[\"pred_logits\"].flatten(0, 1).sigmoid()\n ) # [batch_size * num_queries, num_classes]\n\n out_bbox = outputs[\"pred_boxes\"].flatten(0, 1) # [batch_size * num_queries, 4]\n\n # Also concat the target labels and boxes\n tgt_ids = torch.cat([v[\"labels\"] for v in targets])\n tgt_bbox = torch.cat([v[\"boxes\"] for v in targets])\n\n # Compute the classification cost.\n if self.cost_class_type == \"ce_cost\":\n # Compute the classification cost. Contrary to the loss, we don't use the NLL,\n # but approximate it in 1 - proba[target class].\n # The 1 is a constant that doesn't change the matching, it can be ommitted.\n cost_class = -out_prob[:, tgt_ids]\n elif self.cost_class_type == \"focal_loss_cost\":\n alpha = self.alpha\n gamma = self.gamma\n neg_cost_class = (1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log())\n pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log())\n cost_class = pos_cost_class[:, tgt_ids] - neg_cost_class[:, tgt_ids]\n\n # Compute the L1 cost between boxes\n cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)\n\n # Compute the giou cost betwen boxes\n cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox))\n\n # Final cost matrix\n C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou\n C = C.view(bs, num_queries, -1).cpu()\n\n sizes = [len(v[\"boxes\"]) for v in targets]\n indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]\n return [\n (torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64))\n for i, j in indices\n ]\n\n def __repr__(self, _repr_indent=4):\n head = \"Matcher \" + self.__class__.__name__\n body = [\n \"cost_class: {}\".format(self.cost_class),\n \"cost_bbox: {}\".format(self.cost_bbox),\n \"cost_giou: {}\".format(self.cost_giou),\n \"cost_class_type: {}\".format(self.cost_class_type),\n \"focal cost alpha: {}\".format(self.alpha),\n \"focal cost gamma: {}\".format(self.gamma),\n ]\n lines = [head] + [\" \" * _repr_indent + line for line in body]\n return \"\\n\".join(lines)" }, { "identifier": "ChannelMapper", "path": "detrex/modeling/neck/channel_mapper.py", "snippet": "class ChannelMapper(nn.Module):\n \"\"\"Channel Mapper for reduce/increase channels of backbone features. Modified\n from `mmdet <https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/necks/channel_mapper.py>`_.\n\n This is used to reduce/increase the channels of backbone features.\n\n Args:\n input_shape (Dict[str, ShapeSpec]): A dict which contains the backbone features meta infomation,\n e.g. ``input_shape = {\"res5\": ShapeSpec(channels=2048)}``.\n in_features (List[str]): A list contains the keys which maps the features output from the backbone,\n e.g. ``in_features = [\"res\"]``.\n out_channels (int): Number of output channels for each scale.\n kernel_size (int, optional): Size of the convolving kernel for each scale.\n Default: 3.\n stride (int, optional): Stride of convolution for each scale. Default: 1.\n bias (bool, optional): If True, adds a learnable bias to the output of each scale.\n Default: True.\n groups (int, optional): Number of blocked connections from input channels to\n output channels for each scale. Default: 1.\n dilation (int, optional): Spacing between kernel elements for each scale.\n Default: 1.\n norm_layer (nn.Module, optional): The norm layer used for each scale. Default: None.\n activation (nn.Module, optional): The activation layer used for each scale. Default: None.\n num_outs (int, optional): Number of output feature maps. There will be ``extra_convs`` when\n ``num_outs`` is larger than the length of ``in_features``. Default: None.\n\n Examples:\n >>> import torch\n >>> import torch.nn as nn\n >>> from detrex.modeling import ChannelMapper\n >>> from detectron2.modeling import ShapeSpec\n >>> input_features = {\n ... \"p0\": torch.randn(1, 128, 128, 128),\n ... \"p1\": torch.randn(1, 256, 64, 64),\n ... \"p2\": torch.randn(1, 512, 32, 32),\n ... \"p3\": torch.randn(1, 1024, 16, 16),\n ... }\n >>> input_shapes = {\n ... \"p0\": ShapeSpec(channels=128),\n ... \"p1\": ShapeSpec(channels=256),\n ... \"p2\": ShapeSpec(channels=512),\n ... \"p3\": ShapeSpec(channels=1024),\n ... }\n >>> in_features = [\"p0\", \"p1\", \"p2\", \"p3\"]\n >>> neck = ChannelMapper(\n ... input_shapes=input_shapes,\n ... in_features=in_features,\n ... out_channels=256,\n ... norm_layer=nn.GroupNorm(num_groups=32, num_channels=256)\n >>> outputs = neck(input_features)\n >>> for i in range(len(outputs)):\n ... print(f\"output[{i}].shape = {outputs[i].shape}\")\n output[0].shape = torch.Size([1, 256, 128, 128])\n output[1].shape = torch.Size([1, 256, 64, 64])\n output[2].shape = torch.Size([1, 256, 32, 32])\n output[3].shape = torch.Size([1, 256, 16, 16])\n \"\"\"\n\n def __init__(\n self,\n input_shapes: Dict[str, ShapeSpec],\n in_features: List[str],\n out_channels: int,\n kernel_size: int = 3,\n stride: int = 1,\n bias: bool = True,\n groups: int = 1,\n dilation: int = 1,\n norm_layer: nn.Module = None,\n activation: nn.Module = None,\n num_outs: int = None,\n **kwargs,\n ):\n super(ChannelMapper, self).__init__()\n self.extra_convs = None\n\n in_channels_per_feature = [input_shapes[f].channels for f in in_features]\n\n if num_outs is None:\n num_outs = len(input_shapes)\n\n self.convs = nn.ModuleList()\n for in_channel in in_channels_per_feature:\n self.convs.append(\n ConvNormAct(\n in_channels=in_channel,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=(kernel_size - 1) // 2,\n bias=bias,\n groups=groups,\n dilation=dilation,\n norm_layer=copy.deepcopy(norm_layer),\n activation=copy.deepcopy(activation),\n )\n )\n\n if num_outs > len(in_channels_per_feature):\n self.extra_convs = nn.ModuleList()\n for i in range(len(in_channels_per_feature), num_outs):\n if i == len(in_channels_per_feature):\n in_channel = in_channels_per_feature[-1]\n else:\n in_channel = out_channels\n self.extra_convs.append(\n ConvNormAct(\n in_channels=in_channel,\n out_channels=out_channels,\n kernel_size=3,\n stride=2,\n padding=1,\n bias=bias,\n groups=groups,\n dilation=dilation,\n norm_layer=copy.deepcopy(norm_layer),\n activation=copy.deepcopy(activation),\n )\n )\n\n self.input_shapes = input_shapes\n self.in_features = in_features\n self.out_channels = out_channels\n\n def forward(self, inputs):\n \"\"\"Forward function for ChannelMapper\n\n Args:\n inputs (Dict[str, torch.Tensor]): The backbone feature maps.\n\n Return:\n tuple(torch.Tensor): A tuple of the processed features.\n \"\"\"\n assert len(inputs) == len(self.convs)\n outs = [self.convs[i](inputs[self.in_features[i]]) for i in range(len(inputs))]\n if self.extra_convs:\n for i in range(len(self.extra_convs)):\n if i == 0:\n outs.append(self.extra_convs[0](inputs[self.in_features[-1]]))\n else:\n outs.append(self.extra_convs[i](outs[-1]))\n return tuple(outs)" }, { "identifier": "PositionEmbeddingSine", "path": "detrex/layers/position_embedding.py", "snippet": "class PositionEmbeddingSine(nn.Module):\n \"\"\"Sinusoidal position embedding used in DETR model.\n\n Please see `End-to-End Object Detection with Transformers\n <https://arxiv.org/pdf/2005.12872>`_ for more details.\n\n Args:\n num_pos_feats (int): The feature dimension for each position along\n x-axis or y-axis. The final returned dimension for each position\n is 2 times of the input value.\n temperature (int, optional): The temperature used for scaling\n the position embedding. Default: 10000.\n scale (float, optional): A scale factor that scales the position\n embedding. The scale will be used only when `normalize` is True.\n Default: 2*pi.\n eps (float, optional): A value added to the denominator for numerical\n stability. Default: 1e-6.\n offset (float): An offset added to embed when doing normalization.\n normalize (bool, optional): Whether to normalize the position embedding.\n Default: False.\n \"\"\"\n\n def __init__(\n self,\n num_pos_feats: int = 64,\n temperature: int = 10000,\n scale: float = 2 * math.pi,\n eps: float = 1e-6,\n offset: float = 0.0,\n normalize: bool = False,\n ):\n super().__init__()\n if normalize:\n assert isinstance(scale, (float, int)), (\n \"when normalize is set,\"\n \"scale should be provided and in float or int type, \"\n f\"found {type(scale)}\"\n )\n self.num_pos_feats = num_pos_feats\n self.temperature = temperature\n self.normalize = normalize\n self.scale = scale\n self.eps = eps\n self.offset = offset\n\n def forward(self, mask: torch.Tensor, **kwargs) -> torch.Tensor:\n \"\"\"Forward function for `PositionEmbeddingSine`.\n\n Args:\n mask (torch.Tensor): ByteTensor mask. Non-zero values representing\n ignored positions, while zero values means valid positions\n for the input tensor. Shape as `(bs, h, w)`.\n\n Returns:\n torch.Tensor: Returned position embedding with\n shape `(bs, num_pos_feats * 2, h, w)`\n \"\"\"\n assert mask is not None\n not_mask = ~mask\n y_embed = not_mask.cumsum(1, dtype=torch.float32)\n x_embed = not_mask.cumsum(2, dtype=torch.float32)\n if self.normalize:\n y_embed = (y_embed + self.offset) / (y_embed[:, -1:, :] + self.eps) * self.scale\n x_embed = (x_embed + self.offset) / (x_embed[:, :, -1:] + self.eps) * self.scale\n dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=mask.device)\n dim_t = self.temperature ** (\n 2 * torch.div(dim_t, 2, rounding_mode=\"floor\") / self.num_pos_feats\n )\n pos_x = x_embed[:, :, :, None] / dim_t\n pos_y = y_embed[:, :, :, None] / dim_t\n\n # use view as mmdet instead of flatten for dynamically exporting to ONNX\n B, H, W = mask.size()\n pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).view(\n B, H, W, -1\n )\n pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).view(\n B, H, W, -1\n )\n pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)\n return pos" }, { "identifier": "DabDeformableDetrTransformerEncoder", "path": "projects/dab_deformable_detr/modeling/dab_deformable_transformer.py", "snippet": "class DabDeformableDetrTransformerEncoder(TransformerLayerSequence):\n def __init__(\n self,\n embed_dim: int = 256,\n num_heads: int = 8,\n feedforward_dim: int = 1024,\n attn_dropout: float = 0.1,\n ffn_dropout: float = 0.1,\n operation_order: tuple = (\"self_attn\", \"norm\", \"ffn\", \"norm\"),\n num_layers: int = 6,\n post_norm: bool = False,\n num_feature_levels: int = 4,\n ):\n super(DabDeformableDetrTransformerEncoder, self).__init__(\n transformer_layers=BaseTransformerLayer(\n attn=MultiScaleDeformableAttention(\n embed_dim=embed_dim,\n num_heads=num_heads,\n dropout=attn_dropout,\n batch_first=True,\n num_levels=num_feature_levels,\n ),\n ffn=FFN(\n embed_dim=embed_dim,\n feedforward_dim=feedforward_dim,\n output_dim=embed_dim,\n num_fcs=2,\n ffn_drop=ffn_dropout,\n ),\n norm=nn.LayerNorm(embed_dim),\n operation_order=operation_order,\n ),\n num_layers=num_layers,\n )\n self.embed_dim = self.layers[0].embed_dim\n self.pre_norm = self.layers[0].pre_norm\n\n if post_norm:\n self.post_norm_layer = nn.LayerNorm(self.embed_dim)\n else:\n self.post_norm_layer = None\n\n def forward(\n self,\n query,\n key,\n value,\n query_pos=None,\n key_pos=None,\n attn_masks=None,\n query_key_padding_mask=None,\n key_padding_mask=None,\n **kwargs,\n ):\n\n for layer in self.layers:\n query = layer(\n query,\n key,\n value,\n query_pos=query_pos,\n attn_masks=attn_masks,\n query_key_padding_mask=query_key_padding_mask,\n key_padding_mask=key_padding_mask,\n **kwargs,\n )\n\n if self.post_norm_layer is not None:\n query = self.post_norm_layer(query)\n return query" }, { "identifier": "DabDeformableDetrTransformerDecoder", "path": "projects/dab_deformable_detr/modeling/dab_deformable_transformer.py", "snippet": "class DabDeformableDetrTransformerDecoder(TransformerLayerSequence):\n def __init__(\n self,\n embed_dim: int = 256,\n num_heads: int = 8,\n feedforward_dim: int = 1024,\n attn_dropout: float = 0.1,\n ffn_dropout: float = 0.1,\n num_layers: int = 6,\n return_intermediate: bool = True,\n num_feature_levels: int = 4,\n ):\n super(DabDeformableDetrTransformerDecoder, self).__init__(\n transformer_layers=BaseTransformerLayer(\n attn=[\n MultiheadAttention(\n embed_dim=embed_dim,\n num_heads=num_heads,\n attn_drop=attn_dropout,\n batch_first=True,\n ),\n MultiScaleDeformableAttention(\n embed_dim=embed_dim,\n num_heads=num_heads,\n dropout=attn_dropout,\n batch_first=True,\n num_levels=num_feature_levels,\n ),\n ],\n ffn=FFN(\n embed_dim=embed_dim,\n feedforward_dim=feedforward_dim,\n output_dim=embed_dim,\n ffn_drop=ffn_dropout,\n ),\n norm=nn.LayerNorm(embed_dim),\n operation_order=(\"self_attn\", \"norm\", \"cross_attn\", \"norm\", \"ffn\", \"norm\"),\n ),\n num_layers=num_layers,\n )\n self.return_intermediate = return_intermediate\n\n self.query_scale = MLP(embed_dim, embed_dim, embed_dim, 2)\n self.ref_point_head = MLP(2 * embed_dim, embed_dim, embed_dim, 2)\n\n self.bbox_embed = None\n self.class_embed = None\n\n def forward(\n self,\n query,\n key,\n value,\n query_pos=None,\n key_pos=None,\n attn_masks=None,\n query_key_padding_mask=None,\n key_padding_mask=None,\n reference_points=None, # num_queries, 4. normalized.\n valid_ratios=None,\n **kwargs,\n ):\n output = query\n bs, num_queries, _ = output.size()\n if reference_points.dim() == 2:\n reference_points = reference_points.unsqueeze(0).repeat(bs, 1, 1) # bs, num_queries, 4\n\n intermediate = []\n intermediate_reference_points = []\n for layer_idx, layer in enumerate(self.layers):\n if reference_points.shape[-1] == 4:\n reference_points_input = (\n reference_points[:, :, None]\n * torch.cat([valid_ratios, valid_ratios], -1)[:, None]\n )\n else:\n assert reference_points.shape[-1] == 2\n reference_points_input = reference_points[:, :, None] * valid_ratios[:, None]\n\n query_sine_embed = get_sine_pos_embed(reference_points_input[:, :, 0, :])\n raw_query_pos = self.ref_point_head(query_sine_embed)\n pos_scale = self.query_scale(output) if layer_idx != 0 else 1\n query_pos = pos_scale * raw_query_pos\n\n output = layer(\n output,\n key,\n value,\n query_pos=query_pos,\n key_pos=key_pos,\n query_sine_embed=query_sine_embed,\n attn_masks=attn_masks,\n query_key_padding_mask=query_key_padding_mask,\n key_padding_mask=key_padding_mask,\n reference_points=reference_points_input,\n **kwargs,\n )\n\n if self.bbox_embed is not None:\n tmp = self.bbox_embed[layer_idx](output)\n if reference_points.shape[-1] == 4:\n new_reference_points = tmp + inverse_sigmoid(reference_points)\n new_reference_points = new_reference_points.sigmoid()\n else:\n assert reference_points.shape[-1] == 2\n new_reference_points = tmp\n new_reference_points[..., :2] = tmp[..., :2] + inverse_sigmoid(reference_points)\n new_reference_points = new_reference_points.sigmoid()\n reference_points = new_reference_points.detach()\n\n if self.return_intermediate:\n intermediate.append(output)\n intermediate_reference_points.append(reference_points)\n\n if self.return_intermediate:\n return torch.stack(intermediate), torch.stack(intermediate_reference_points)\n\n return output, reference_points" }, { "identifier": "DabDeformableDetrTransformer", "path": "projects/dab_deformable_detr/modeling/dab_deformable_transformer.py", "snippet": "class DabDeformableDetrTransformer(nn.Module):\n \"\"\"Transformer module for DAB-Deformable-DETR\n\n Args:\n encoder (nn.Module): encoder module.\n decoder (nn.Module): decoder module.\n as_two_stage (bool): whether to use two-stage transformer. Default False.\n num_feature_levels (int): number of feature levels. Default 4.\n two_stage_num_proposals (int): number of proposals in two-stage transformer. Default 100.\n Only used when as_two_stage is True.\n \"\"\"\n\n def __init__(\n self,\n encoder=None,\n decoder=None,\n as_two_stage=False,\n num_feature_levels=4,\n two_stage_num_proposals=300,\n ):\n super(DabDeformableDetrTransformer, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n self.as_two_stage = as_two_stage\n self.num_feature_levels = num_feature_levels\n self.two_stage_num_proposals = two_stage_num_proposals\n\n self.embed_dim = self.encoder.embed_dim\n\n self.level_embeds = nn.Parameter(torch.Tensor(self.num_feature_levels, self.embed_dim))\n\n if self.as_two_stage:\n self.enc_output = nn.Linear(self.embed_dim, self.embed_dim)\n self.enc_output_norm = nn.LayerNorm(self.embed_dim)\n\n self.init_weights()\n\n def init_weights(self):\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n for m in self.modules():\n if isinstance(m, MultiScaleDeformableAttention):\n m.init_weights()\n nn.init.normal_(self.level_embeds)\n\n def gen_encoder_output_proposals(self, memory, memory_padding_mask, spatial_shapes):\n N, S, C = memory.shape\n proposals = []\n _cur = 0\n for lvl, (H, W) in enumerate(spatial_shapes):\n mask_flatten_ = memory_padding_mask[:, _cur : (_cur + H * W)].view(N, H, W, 1)\n valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)\n valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)\n\n grid_y, grid_x = torch.meshgrid(\n torch.linspace(0, H - 1, H, dtype=torch.float32, device=memory.device),\n torch.linspace(0, W - 1, W, dtype=torch.float32, device=memory.device),\n )\n grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)\n\n scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N, 1, 1, 2)\n grid = (grid.unsqueeze(0).expand(N, -1, -1, -1) + 0.5) / scale\n wh = torch.ones_like(grid) * 0.05 * (2.0**lvl)\n proposal = torch.cat((grid, wh), -1).view(N, -1, 4)\n proposals.append(proposal)\n _cur += H * W\n\n output_proposals = torch.cat(proposals, 1)\n output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(\n -1, keepdim=True\n )\n output_proposals = torch.log(output_proposals / (1 - output_proposals))\n output_proposals = output_proposals.masked_fill(\n memory_padding_mask.unsqueeze(-1), float(\"inf\")\n )\n output_proposals = output_proposals.masked_fill(~output_proposals_valid, float(\"inf\"))\n\n output_memory = memory\n output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float(0))\n output_memory = output_memory.masked_fill(~output_proposals_valid, float(0))\n output_memory = self.enc_output_norm(self.enc_output(output_memory))\n return output_memory, output_proposals\n\n @staticmethod\n def get_reference_points(spatial_shapes, valid_ratios, device):\n \"\"\"Get the reference points used in decoder.\n\n Args:\n spatial_shapes (Tensor): The shape of all\n feature maps, has shape (num_level, 2).\n valid_ratios (Tensor): The ratios of valid\n points on the feature map, has shape\n (bs, num_levels, 2)\n device (obj:`device`): The device where\n reference_points should be.\n\n Returns:\n Tensor: reference points used in decoder, has \\\n shape (bs, num_keys, num_levels, 2).\n \"\"\"\n reference_points_list = []\n for lvl, (H, W) in enumerate(spatial_shapes):\n # TODO check this 0.5\n ref_y, ref_x = torch.meshgrid(\n torch.linspace(0.5, H - 0.5, H, dtype=torch.float32, device=device),\n torch.linspace(0.5, W - 0.5, W, dtype=torch.float32, device=device),\n )\n ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * H)\n ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * W)\n ref = torch.stack((ref_x, ref_y), -1)\n reference_points_list.append(ref)\n reference_points = torch.cat(reference_points_list, 1)\n reference_points = reference_points[:, :, None] * valid_ratios[:, None]\n return reference_points\n\n def get_valid_ratio(self, mask):\n \"\"\"Get the valid ratios of feature maps of all levels.\"\"\"\n _, H, W = mask.shape\n valid_H = torch.sum(~mask[:, :, 0], 1)\n valid_W = torch.sum(~mask[:, 0, :], 1)\n valid_ratio_h = valid_H.float() / H\n valid_ratio_w = valid_W.float() / W\n valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)\n return valid_ratio\n\n def forward(\n self,\n multi_level_feats,\n multi_level_masks,\n multi_level_pos_embeds,\n query_embed,\n **kwargs,\n ):\n feat_flatten = []\n mask_flatten = []\n lvl_pos_embed_flatten = []\n spatial_shapes = []\n for lvl, (feat, mask, pos_embed) in enumerate(\n zip(multi_level_feats, multi_level_masks, multi_level_pos_embeds)\n ):\n bs, c, h, w = feat.shape\n spatial_shape = (h, w)\n spatial_shapes.append(spatial_shape)\n\n feat = feat.flatten(2).transpose(1, 2) # bs, hw, c\n mask = mask.flatten(1)\n pos_embed = pos_embed.flatten(2).transpose(1, 2) # bs, hw, c\n lvl_pos_embed = pos_embed + self.level_embeds[lvl].view(1, 1, -1)\n lvl_pos_embed_flatten.append(lvl_pos_embed)\n feat_flatten.append(feat)\n mask_flatten.append(mask)\n feat_flatten = torch.cat(feat_flatten, 1)\n mask_flatten = torch.cat(mask_flatten, 1)\n lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)\n spatial_shapes = torch.as_tensor(\n spatial_shapes, dtype=torch.long, device=feat_flatten.device\n )\n level_start_index = torch.cat(\n (spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1])\n )\n valid_ratios = torch.stack([self.get_valid_ratio(m) for m in multi_level_masks], 1)\n\n reference_points = self.get_reference_points(\n spatial_shapes, valid_ratios, device=feat.device\n )\n\n memory = self.encoder(\n query=feat_flatten,\n key=None,\n value=None,\n query_pos=lvl_pos_embed_flatten,\n query_key_padding_mask=mask_flatten,\n spatial_shapes=spatial_shapes,\n reference_points=reference_points, # bs, num_token, num_level, 2\n level_start_index=level_start_index,\n valid_ratios=valid_ratios,\n **kwargs,\n )\n\n bs, _, c = memory.shape\n if self.as_two_stage:\n assert query_embed is None, \"query_embed should be None in two-stage\"\n output_memory, output_proposals = self.gen_encoder_output_proposals(\n memory, mask_flatten, spatial_shapes\n )\n # output_memory: bs, num_tokens, c\n # output_proposals: bs, num_tokens, 4. unsigmoided.\n # output_proposals: bs, num_tokens, 4\n\n enc_outputs_class = self.decoder.class_embed[self.decoder.num_layers](output_memory)\n enc_outputs_coord_unact = (\n self.decoder.bbox_embed[self.decoder.num_layers](output_memory) + output_proposals\n ) # unsigmoided.\n\n topk = self.two_stage_num_proposals\n topk_proposals = torch.topk(enc_outputs_class.max(-1)[0], topk, dim=1)[1]\n\n # extract region proposal boxes\n topk_coords_unact = torch.gather(\n enc_outputs_coord_unact, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)\n ) # unsigmoided.\n reference_points = topk_coords_unact.detach().sigmoid()\n init_reference_out = reference_points\n\n # extract region features\n target_unact = torch.gather(\n output_memory, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, output_memory.shape[-1])\n )\n target = target_unact.detach()\n else:\n reference_points = query_embed[..., self.embed_dim :].sigmoid()\n target = query_embed[..., : self.embed_dim]\n target = target.unsqueeze(0).expand(bs, -1, -1)\n init_reference_out = reference_points\n # (300, 4)\n\n # decoder\n inter_states, inter_references = self.decoder(\n query=target, # bs, num_queries, embed_dims\n key=memory, # bs, num_tokens, embed_dims\n value=memory, # bs, num_tokens, embed_dims\n query_pos=None,\n key_padding_mask=mask_flatten, # bs, num_tokens\n reference_points=reference_points, # num_queries, 4\n spatial_shapes=spatial_shapes, # nlvl, 2\n level_start_index=level_start_index, # nlvl\n valid_ratios=valid_ratios, # bs, nlvl, 2\n **kwargs,\n )\n\n inter_references_out = inter_references\n if self.as_two_stage:\n return (\n inter_states,\n init_reference_out,\n inter_references_out,\n target_unact,\n topk_coords_unact.sigmoid(),\n )\n return inter_states, init_reference_out, inter_references_out, None, None" }, { "identifier": "DabDeformableDETR", "path": "projects/dab_deformable_detr/modeling/dab_deformable_detr.py", "snippet": "class DabDeformableDETR(nn.Module):\n \"\"\"Implement DAB-Deformable-DETR in `DAB-DETR: Dynamic Anchor Boxes are Better Queries for DETR\n <https://arxiv.org/abs/2201.12329>`_.\n\n Code is modified from the `official github repo\n <https://github.com/IDEA-opensource/DAB-DETR>`_.\n\n Args:\n backbone (nn.Module): backbone module\n position_embedding (nn.Module): position embedding module\n neck (nn.Module): neck module\n transformer (nn.Module): transformer module\n embed_dim (int): dimension of embedding\n num_classes (int): Number of total categories.\n num_queries (int): Number of proposal dynamic anchor boxes in Transformer\n criterion (nn.Module): Criterion for calculating the total losses.\n pixel_mean (List[float]): Pixel mean value for image normalization.\n Default: [123.675, 116.280, 103.530].\n pixel_std (List[float]): Pixel std value for image normalization.\n Default: [58.395, 57.120, 57.375].\n aux_loss (bool): Whether to calculate auxiliary loss in criterion. Default: True.\n select_box_nums_for_evaluation (int): the number of topk candidates\n slected at postprocess for evaluation. Default: 100.\n device (str): Training device. Default: \"cuda\".\n \"\"\"\n\n def __init__(\n self,\n backbone: nn.Module,\n position_embedding: nn.Module,\n neck: nn.Module,\n transformer: nn.Module,\n embed_dim: int,\n num_classes: int,\n num_queries: int,\n criterion: nn.Module,\n pixel_mean: List[float] = [123.675, 116.280, 103.530],\n pixel_std: List[float] = [58.395, 57.120, 57.375],\n aux_loss: bool = True,\n as_two_stage: bool = False,\n select_box_nums_for_evaluation: int = 300,\n device=\"cuda\",\n ):\n super().__init__()\n # define backbone and position embedding module\n self.backbone = backbone\n self.position_embedding = position_embedding\n\n # define neck module\n self.neck = neck\n\n # define leanable anchor boxes and learnable tgt embedings.\n # tgt embedings corresponding to content queries in original paper.\n self.num_queries = num_queries\n if not as_two_stage:\n self.tgt_embed = nn.Embedding(num_queries, embed_dim)\n self.refpoint_embed = nn.Embedding(num_queries, 4)\n # initialize learnable anchor boxes\n nn.init.zeros_(self.tgt_embed.weight)\n nn.init.uniform_(self.refpoint_embed.weight)\n self.refpoint_embed.weight.data[:] = inverse_sigmoid(\n self.refpoint_embed.weight.data[:]\n ).clamp(-3, 3)\n\n # define transformer module\n self.transformer = transformer\n\n # define classification head and box head\n self.class_embed = nn.Linear(embed_dim, num_classes)\n self.bbox_embed = MLP(embed_dim, embed_dim, 4, 3)\n self.num_classes = num_classes\n\n # where to calculate auxiliary loss in criterion\n self.aux_loss = aux_loss\n self.criterion = criterion\n\n # define contoller for two-stage variants\n self.as_two_stage = as_two_stage\n\n # init parameters for heads\n prior_prob = 0.01\n bias_value = -math.log((1 - prior_prob) / prior_prob)\n self.class_embed.bias.data = torch.ones(num_classes) * bias_value\n nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0)\n nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0)\n for _, neck_layer in self.neck.named_modules():\n if isinstance(neck_layer, nn.Conv2d):\n nn.init.xavier_uniform_(neck_layer.weight, gain=1)\n nn.init.constant_(neck_layer.bias, 0)\n\n # if two-stage, the last class_embed and bbox_embed is for region proposal generation\n num_pred = (\n (transformer.decoder.num_layers + 1) if as_two_stage else transformer.decoder.num_layers\n )\n self.class_embed = nn.ModuleList([copy.deepcopy(self.class_embed) for i in range(num_pred)])\n self.bbox_embed = nn.ModuleList([copy.deepcopy(self.bbox_embed) for i in range(num_pred)])\n nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0)\n\n # hack implementation for two-stage\n if self.as_two_stage:\n self.transformer.decoder.class_embed = self.class_embed\n\n # hack implementation for iterative bounding box refinement and two-stage.\n # The last class_embed and bbox_embed is for region proposal generation\n self.transformer.decoder.bbox_embed = self.bbox_embed\n if self.as_two_stage:\n for bbox_embed_layer in self.bbox_embed:\n nn.init.constant_(bbox_embed_layer.layers[-1].bias.data[2:], 0.0)\n\n # set topk boxes selected for inference\n self.select_box_nums_for_evaluation = select_box_nums_for_evaluation\n\n # normalizer for input raw images\n self.device = device\n pixel_mean = torch.Tensor(pixel_mean).to(self.device).view(3, 1, 1)\n pixel_std = torch.Tensor(pixel_std).to(self.device).view(3, 1, 1)\n self.normalizer = lambda x: (x - pixel_mean) / pixel_std\n\n def forward(self, batched_inputs):\n \"\"\"Forward function of `DAB-Deformable-DETR` which excepts a list of dict as inputs.\n\n Args:\n batched_inputs (List[dict]): A list of instance dict, and each instance dict must consists of:\n - dict[\"image\"] (torch.Tensor): The unnormalized image tensor.\n - dict[\"height\"] (int): The original image height.\n - dict[\"width\"] (int): The original image width.\n - dict[\"instance\"] (detectron2.structures.Instances):\n Image meta informations and ground truth boxes and labels during training.\n Please refer to\n https://detectron2.readthedocs.io/en/latest/modules/structures.html#detectron2.structures.Instances\n for the basic usage of Instances.\n\n Returns:\n dict: Returns a dict with the following elements:\n - dict[\"pred_logits\"]: the classification logits for all queries (anchor boxes in DAB-DETR).\n with shape ``[batch_size, num_queries, num_classes]``\n - dict[\"pred_boxes\"]: The normalized boxes coordinates for all queries in format\n ``(x, y, w, h)``. These values are normalized in [0, 1] relative to the size of\n each individual image (disregarding possible padding). See PostProcess for information\n on how to retrieve the unnormalized bounding box.\n - dict[\"aux_outputs\"]: Optional, only returned when auxilary losses are activated. It is a list of\n dictionnaries containing the two above keys for each decoder layer.\n \"\"\"\n images = self.preprocess_image(batched_inputs)\n\n if self.training:\n batch_size, _, H, W = images.tensor.shape\n img_masks = images.tensor.new_ones(batch_size, H, W)\n for img_id in range(batch_size):\n # mask padding regions in batched images\n img_h, img_w = batched_inputs[img_id][\"instances\"].image_size\n img_masks[img_id, :img_h, :img_w] = 0\n else:\n batch_size, _, H, W = images.tensor.shape\n img_masks = images.tensor.new_zeros(batch_size, H, W)\n\n # original features\n features = self.backbone(images.tensor) # output feature dict\n\n # project backbone features to the reuired dimension of transformer\n # we use multi-scale features in DAB-Deformable-DETR\n multi_level_feats = self.neck(features)\n multi_level_masks = []\n multi_level_position_embeddings = []\n for feat in multi_level_feats:\n multi_level_masks.append(\n F.interpolate(img_masks[None], size=feat.shape[-2:]).to(torch.bool).squeeze(0)\n )\n multi_level_position_embeddings.append(self.position_embedding(multi_level_masks[-1]))\n\n # initialize object query embeddings\n if self.as_two_stage:\n query_embeds = None\n else:\n tgt_embed = self.tgt_embed.weight # nq, 256\n refanchor = self.refpoint_embed.weight # nq, 4\n query_embeds = torch.cat((tgt_embed, refanchor), dim=1)\n\n (\n inter_states,\n init_reference,\n inter_references,\n enc_state,\n enc_reference, # [0..1]\n ) = self.transformer(\n multi_level_feats, multi_level_masks, multi_level_position_embeddings, query_embeds\n )\n\n # Calculate output coordinates and classes.\n outputs_classes = []\n outputs_coords = []\n for lvl in range(inter_states.shape[0]):\n if lvl == 0:\n reference = init_reference\n else:\n reference = inter_references[lvl - 1]\n reference = inverse_sigmoid(reference)\n outputs_class = self.class_embed[lvl](inter_states[lvl])\n tmp = self.bbox_embed[lvl](inter_states[lvl])\n if reference.shape[-1] == 4:\n tmp += reference\n else:\n assert reference.shape[-1] == 2\n tmp[..., :2] += reference\n outputs_coord = tmp.sigmoid()\n outputs_classes.append(outputs_class)\n outputs_coords.append(outputs_coord)\n outputs_class = torch.stack(outputs_classes)\n # tensor shape: [num_decoder_layers, bs, num_query, num_classes]\n outputs_coord = torch.stack(outputs_coords)\n # tensor shape: [num_decoder_layers, bs, num_query, 4]\n\n # prepare for loss computation\n output = {\"pred_logits\": outputs_class[-1], \"pred_boxes\": outputs_coord[-1]}\n if self.aux_loss:\n output[\"aux_outputs\"] = self._set_aux_loss(outputs_class, outputs_coord)\n\n # prepare two stage output\n if self.as_two_stage:\n interm_coord = enc_reference\n interm_class = self.class_embed[-1](enc_state)\n output[\"enc_outputs\"] = {\"pred_logits\": interm_class, \"pred_boxes\": interm_coord}\n\n if self.training:\n gt_instances = [x[\"instances\"].to(self.device) for x in batched_inputs]\n targets = self.prepare_targets(gt_instances)\n loss_dict = self.criterion(output, targets)\n weight_dict = self.criterion.weight_dict\n for k in loss_dict.keys():\n if k in weight_dict:\n loss_dict[k] *= weight_dict[k]\n return loss_dict\n else:\n box_cls = output[\"pred_logits\"]\n box_pred = output[\"pred_boxes\"]\n results = self.inference(box_cls, box_pred, images.image_sizes)\n processed_results = []\n for results_per_image, input_per_image, image_size in zip(\n results, batched_inputs, images.image_sizes\n ):\n height = input_per_image.get(\"height\", image_size[0])\n width = input_per_image.get(\"width\", image_size[1])\n r = detector_postprocess(results_per_image, height, width)\n processed_results.append({\"instances\": r})\n return processed_results\n\n def preprocess_image(self, batched_inputs):\n images = [self.normalizer(x[\"image\"].to(self.device)) for x in batched_inputs]\n images = ImageList.from_tensors(images)\n return images\n\n def inference(self, box_cls, box_pred, image_sizes):\n \"\"\"\n Arguments:\n box_cls (Tensor): tensor of shape (batch_size, num_queries, K).\n The tensor predicts the classification probability for each query.\n box_pred (Tensor): tensors of shape (batch_size, num_queries, 4).\n The tensor predicts 4-vector (x,y,w,h) box\n regression values for every queryx\n image_sizes (List[torch.Size]): the input image sizes\n\n Returns:\n results (List[Instances]): a list of #images elements.\n \"\"\"\n assert len(box_cls) == len(image_sizes)\n results = []\n\n # Select top-k confidence boxes for inference\n prob = box_cls.sigmoid()\n topk_values, topk_indexes = torch.topk(\n prob.view(box_cls.shape[0], -1), self.select_box_nums_for_evaluation, dim=1\n )\n scores = topk_values\n topk_boxes = torch.div(topk_indexes, box_cls.shape[2], rounding_mode=\"floor\")\n labels = topk_indexes % box_cls.shape[2]\n\n boxes = torch.gather(box_pred, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))\n\n for i, (scores_per_image, labels_per_image, box_pred_per_image, image_size) in enumerate(\n zip(scores, labels, boxes, image_sizes)\n ):\n result = Instances(image_size)\n result.pred_boxes = Boxes(box_cxcywh_to_xyxy(box_pred_per_image))\n\n result.pred_boxes.scale(scale_x=image_size[1], scale_y=image_size[0])\n result.scores = scores_per_image\n result.pred_classes = labels_per_image\n results.append(result)\n return results\n\n def prepare_targets(self, targets):\n new_targets = []\n for targets_per_image in targets:\n h, w = targets_per_image.image_size\n image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float, device=self.device)\n gt_classes = targets_per_image.gt_classes\n gt_boxes = targets_per_image.gt_boxes.tensor / image_size_xyxy\n gt_boxes = box_xyxy_to_cxcywh(gt_boxes)\n new_targets.append({\"labels\": gt_classes, \"boxes\": gt_boxes})\n return new_targets\n\n @torch.jit.unused\n def _set_aux_loss(self, outputs_class, outputs_coord):\n # this is a workaround to make torchscript happy, as torchscript\n # doesn't support dictionary with non-homogeneous values, such\n # as a dict having both a Tensor and a list.\n return [\n {\"pred_logits\": a, \"pred_boxes\": b}\n for a, b in zip(outputs_class[:-1], outputs_coord[:-1])\n ]" }, { "identifier": "TwoStageCriterion", "path": "projects/dab_deformable_detr/modeling/two_stage_criterion.py", "snippet": "class TwoStageCriterion(SetCriterion):\n def __init__(\n self,\n num_classes,\n matcher,\n weight_dict,\n losses=[\"class\", \"boxes\"],\n eos_coef=None,\n loss_class_type=\"focal_loss\",\n alpha: float = 0.25,\n gamma: float = 2,\n two_stage_binary_cls=False,\n ):\n super().__init__(\n num_classes, matcher, weight_dict, losses, eos_coef, loss_class_type, alpha, gamma\n )\n self.two_stage_binary_cls = two_stage_binary_cls\n\n def forward(self, outputs, targets, return_indices=False):\n \"\"\"This performs the loss computation.\n Parameters:\n outputs: dict of tensors, see the output specification of the model for the format\n targets: list of dicts, such that len(targets) == batch_size.\n The expected keys in each dict depends on the losses applied, see each loss' doc\n\n return_indices: used for vis. if True, the layer0-5 indices will be returned as well.\n\n \"\"\"\n\n outputs_without_aux = {k: v for k, v in outputs.items() if k != \"aux_outputs\"}\n\n # Retrieve the matching between the outputs of the last layer and the targets\n indices = self.matcher(outputs_without_aux, targets)\n\n # Compute the average number of target boxes accross all nodes, for normalization purposes\n num_boxes = sum(len(t[\"labels\"]) for t in targets)\n num_boxes = torch.as_tensor(\n [num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device\n )\n if is_dist_avail_and_initialized():\n torch.distributed.all_reduce(num_boxes)\n num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()\n\n # Compute all the requested losses\n losses = {}\n for loss in self.losses:\n losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))\n\n # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.\n if \"aux_outputs\" in outputs:\n for i, aux_outputs in enumerate(outputs[\"aux_outputs\"]):\n indices = self.matcher(aux_outputs, targets)\n for loss in self.losses:\n l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes)\n l_dict = {k + f\"_{i}\": v for k, v in l_dict.items()}\n losses.update(l_dict)\n\n # for two stage\n if \"enc_outputs\" in outputs:\n enc_outputs = outputs[\"enc_outputs\"]\n if self.two_stage_binary_cls:\n for bt in targets:\n bt[\"labels\"] = torch.zeros_like(bt[\"labels\"])\n indices = self.matcher(enc_outputs, targets)\n for loss in self.losses:\n if loss == \"masks\":\n # Intermediate masks losses are too costly to compute, we ignore them.\n continue\n l_dict = self.get_loss(loss, enc_outputs, targets, indices, num_boxes)\n l_dict = {k + \"_enc\": v for k, v in l_dict.items()}\n losses.update(l_dict)\n\n return losses" } ]
import copy import torch.nn as nn from detectron2.modeling.backbone import ResNet, BasicStem from detectron2.layers import ShapeSpec from detectron2.config import LazyCall as L from detrex.modeling.matcher import HungarianMatcher from detrex.modeling.neck import ChannelMapper from detrex.layers import PositionEmbeddingSine from projects.dab_deformable_detr.modeling import ( DabDeformableDETR, DabDeformableDetrTransformerEncoder, DabDeformableDetrTransformerDecoder, DabDeformableDetrTransformer, TwoStageCriterion, )
12,988
model = L(DabDeformableDETR)( backbone=L(ResNet)( stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"), stages=L(ResNet.make_default_stages)( depth=50, stride_in_1x1=False, norm="FrozenBN", ), out_features=["res3", "res4", "res5"], freeze_at=1, ), position_embedding=L(PositionEmbeddingSine)( num_pos_feats=128, temperature=10000, normalize=True, offset=-0.5, ), neck=L(ChannelMapper)( input_shapes={ "res3": ShapeSpec(channels=512), "res4": ShapeSpec(channels=1024), "res5": ShapeSpec(channels=2048), }, in_features=["res3", "res4", "res5"], out_channels=256, num_outs=4, kernel_size=1, norm_layer=L(nn.GroupNorm)(num_groups=32, num_channels=256), ), transformer=L(DabDeformableDetrTransformer)(
model = L(DabDeformableDETR)( backbone=L(ResNet)( stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"), stages=L(ResNet.make_default_stages)( depth=50, stride_in_1x1=False, norm="FrozenBN", ), out_features=["res3", "res4", "res5"], freeze_at=1, ), position_embedding=L(PositionEmbeddingSine)( num_pos_feats=128, temperature=10000, normalize=True, offset=-0.5, ), neck=L(ChannelMapper)( input_shapes={ "res3": ShapeSpec(channels=512), "res4": ShapeSpec(channels=1024), "res5": ShapeSpec(channels=2048), }, in_features=["res3", "res4", "res5"], out_channels=256, num_outs=4, kernel_size=1, norm_layer=L(nn.GroupNorm)(num_groups=32, num_channels=256), ), transformer=L(DabDeformableDetrTransformer)(
encoder=L(DabDeformableDetrTransformerEncoder)(
3
2023-10-12 03:02:25+00:00
16k
ByungKwanLee/Full-Segment-Anything
mask_generator.py
[ { "identifier": "Sam", "path": "modeling/sam.py", "snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\n pixel_std: List[float] = [58.395, 57.12, 57.375],\n ) -> None:\n \"\"\"\n SAM predicts object masks from an image and input prompts.\n\n Arguments:\n image_encoder (ImageEncoderViT): The backbone used to encode the\n image into image embeddings that allow for efficient mask prediction.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\n and encoded prompts.\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\n \"\"\"\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer(\"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False)\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\n\n @property\n def device(self) -> Any:\n return self.pixel_mean.device\n\n @torch.no_grad()\n def forward(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n ) -> List[Dict[str, torch.Tensor]]:\n \"\"\"\n Predicts masks end-to-end from provided images and prompts.\n If prompts are not known in advance, using SamPredictor is\n recommended over calling the model directly.\n\n Arguments:\n batched_input (list(dict)): A list over input images, each a\n dictionary with the following keys. A prompt key can be\n excluded if it is not present.\n 'image': The image as a torch tensor in 3xHxW format,\n already transformed for input to the model.\n 'original_size': (tuple(int, int)) The original size of\n the image before transformation, as (H, W).\n 'point_coords': (torch.Tensor) Batched point prompts for\n this image, with shape BxNx2. Already transformed to the\n input frame of the model.\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\n with shape BxN.\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\n Already transformed to the input frame of the model.\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\n in the form Bx1xHxW.\n multimask_output (bool): Whether the model should predict multiple\n disambiguating masks, or return a single mask.\n\n Returns:\n (list(dict)): A list over input images, where each element is\n as dictionary with the following keys.\n 'masks': (torch.Tensor) Batched binary mask predictions,\n with shape BxCxHxW, where B is the number of input prompts,\n C is determined by multimask_output, and (H, W) is the\n original size of the image.\n 'iou_predictions': (torch.Tensor) The model's predictions\n of mask quality, in shape BxC.\n 'low_res_logits': (torch.Tensor) Low resolution logits with\n shape BxCxHxW, where H=W=256. Can be passed as mask input\n to subsequent iterations of prediction.\n \"\"\"\n input_images = torch.stack([self.preprocess(x[\"image\"]) for x in batched_input], dim=0)\n image_embeddings = self.image_encoder(input_images)\n\n outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if \"point_coords\" in image_record:\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get(\"boxes\", None),\n masks=image_record.get(\"mask_inputs\", None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=image_record[\"image\"].shape[-2:],\n original_size=image_record[\"original_size\"],\n )\n masks = masks > self.mask_threshold\n outputs.append(\n {\n \"masks\": masks,\n \"iou_predictions\": iou_predictions,\n \"low_res_logits\": low_res_masks,\n }\n )\n return outputs\n \n\n # Batch Individual Mask Generation by LBK\n @torch.no_grad()\n def individual_forward(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n is_low_resol: bool = False,\n ) -> List[Dict[str, torch.Tensor]]:\n \n input_images = torch.stack([self.lbk_preprocess(x[\"image\"]) for x in batched_input], dim=0)\n image_embeddings = self.image_encoder(input_images)\n\n refined_mask_outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if \"point_coords\" in image_record:\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get(\"boxes\", None),\n masks=image_record.get(\"mask_inputs\", None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n\n # Progressing Intergraion.. by LBK\n refined_masks = self.postprocess_small_regions(low_res_masks, iou_predictions, *input_images.shape[2:], is_low_resol)\n if not is_low_resol:\n refined_masks = F.interpolate(\n refined_masks.unsqueeze(1).float(),\n input_images.shape[2:],\n mode=\"bilinear\",\n align_corners=False,\n ).squeeze(1).bool()\n refined_mask_outputs.append(refined_masks)\n \n return refined_mask_outputs\n \n # PostProcess by LBK EDIT\n def postprocess_small_regions(self, masks, iou_predictions, orig_h, orig_w, is_low_resol):\n\n\n \"\"\"\n Configuration\n \"\"\"\n # pred_iou_thresh = 0.85\n # stability_score_offset = 1.0\n # stability_score_thresh = 0.85\n # box_nms_thresh = 0.7\n\n\n pred_iou_thresh = 0.7\n stability_score_offset = 1.0\n stability_score_thresh = 0.7\n box_nms_thresh = 0.7\n\n # Interpolation\n if not is_low_resol:\n masks = F.interpolate(\n masks,\n (orig_h, orig_w),\n mode=\"bilinear\",\n align_corners=False,\n )\n else:\n orig_h, orig_w = masks.shape[2:]\n\n # Serialize predictions and store in MaskData\n data = MaskData(\n masks=masks.flatten(0, 1),\n iou_preds=iou_predictions.flatten(0, 1), \n )\n\n # Filter by predicted IoU\n if pred_iou_thresh > 0.0:\n keep_mask = data[\"iou_preds\"] > pred_iou_thresh\n data.filter(keep_mask)\n\n # Calculate stability score\n data[\"stability_score\"] = calculate_stability_score(\n data[\"masks\"], self.mask_threshold, stability_score_offset\n )\n if stability_score_thresh > 0.0:\n keep_mask = data[\"stability_score\"] >= stability_score_thresh\n data.filter(keep_mask)\n\n # Threshold masks and calculate boxes\n data[\"masks\"] = data[\"masks\"] > self.mask_threshold\n data[\"boxes\"] = batched_mask_to_box(data[\"masks\"])\n\n # Filter boxes that touch crop boundaries\n keep_mask = ~is_box_near_crop_edge(data[\"boxes\"], [0, 0, orig_w, orig_h], [0, 0, orig_w, orig_h])\n if not torch.all(keep_mask):\n data.filter(keep_mask)\n data['masks'] = uncrop_masks(data[\"masks\"], [0, 0, orig_w, orig_h], orig_h, orig_w)\n\n # Remove duplicates within this crop.\n keep_by_nms = batched_nms(\n data[\"boxes\"].float(),\n data[\"iou_preds\"],\n torch.zeros_like(data[\"boxes\"][:, 0]), # categories\n iou_threshold=box_nms_thresh,\n )\n data.filter(keep_by_nms)\n\n # making masks\n return data['masks']\n\n def postprocess_masks(\n self,\n masks: torch.Tensor,\n input_size: Tuple[int, ...],\n original_size: Tuple[int, ...],\n ) -> torch.Tensor:\n \"\"\"\n Remove padding and upscale masks to the original image size.\n\n Arguments:\n masks (torch.Tensor): Batched masks from the mask_decoder,\n in BxCxHxW format.\n input_size (tuple(int, int)): The size of the image input to the\n model, in (H, W) format. Used to remove padding.\n original_size (tuple(int, int)): The original size of the image\n before resizing for input to the model, in (H, W) format.\n\n Returns:\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\n is given by original_size.\n \"\"\"\n\n masks = F.interpolate(\n masks,\n (self.image_encoder.img_size, self.image_encoder.img_size),\n mode=\"bilinear\",\n align_corners=False,\n )\n masks = masks[..., : input_size[0], : input_size[1]]\n masks = F.interpolate(masks, original_size, mode=\"bilinear\", align_corners=False)\n return masks\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.image_encoder.img_size - h\n padw = self.image_encoder.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x\n \n # by lbk edit\n def lbk_preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n return x" }, { "identifier": "SamPredictor", "path": "predictor.py", "snippet": "class SamPredictor:\n def __init__(\n self,\n sam_model: Sam,\n ) -> None:\n \"\"\"\n Uses SAM to calculate the image embedding for an image, and then\n allow repeated, efficient mask prediction given prompts.\n\n Arguments:\n sam_model (Sam): The model to use for mask prediction.\n \"\"\"\n super().__init__()\n self.model = sam_model\n self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)\n self.reset_image()\n\n def set_image(\n self,\n image: np.ndarray,\n image_format: str = \"RGB\",\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method.\n\n Arguments:\n image (np.ndarray): The image for calculating masks. Expects an\n image in HWC uint8 format, with pixel values in [0, 255].\n image_format (str): The color format of the image, in ['RGB', 'BGR'].\n \"\"\"\n assert image_format in [\n \"RGB\",\n \"BGR\",\n ], f\"image_format must be in ['RGB', 'BGR'], is {image_format}.\"\n if image_format != self.model.image_format:\n image = image[..., ::-1]\n\n # Transform the image to the form expected by the model\n input_image = self.transform.apply_image(image)\n input_image_torch = torch.as_tensor(input_image, device=self.device)\n input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :]\n\n self.set_torch_image(input_image_torch, image.shape[:2])\n\n @torch.no_grad()\n def set_torch_image(\n self,\n transformed_image: torch.Tensor,\n original_image_size: Tuple[int, ...],\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method. Expects the input\n image to be already transformed to the format expected by the model.\n\n Arguments:\n transformed_image (torch.Tensor): The input image, with shape\n 1x3xHxW, which has been transformed with ResizeLongestSide.\n original_image_size (tuple(int, int)): The size of the image\n before transformation, in (H, W) format.\n \"\"\"\n assert (\n len(transformed_image.shape) == 4\n and transformed_image.shape[1] == 3\n and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size\n ), f\"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}.\"\n self.reset_image()\n\n self.original_size = original_image_size\n self.input_size = tuple(transformed_image.shape[-2:])\n input_image = self.model.preprocess(transformed_image)\n self.features = self.model.image_encoder(input_image)\n self.is_image_set = True\n\n def predict(\n self,\n point_coords: Optional[np.ndarray] = None,\n point_labels: Optional[np.ndarray] = None,\n box: Optional[np.ndarray] = None,\n mask_input: Optional[np.ndarray] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n\n Arguments:\n point_coords (np.ndarray or None): A Nx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (np.ndarray or None): A length N array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n box (np.ndarray or None): A length 4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form 1xHxW, where\n for SAM, H=W=256.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (np.ndarray): The output masks in CxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (np.ndarray): An array of length C containing the model's\n predictions for the quality of each mask.\n (np.ndarray): An array of shape CxHxW, where C is the number\n of masks and H=W=256. These low resolution logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\"An image must be set with .set_image(...) before mask prediction.\")\n\n # Transform input prompts\n coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None\n if point_coords is not None:\n assert (\n point_labels is not None\n ), \"point_labels must be supplied if point_coords is supplied.\"\n point_coords = self.transform.apply_coords(point_coords, self.original_size)\n coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device)\n labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)\n coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]\n if box is not None:\n box = self.transform.apply_boxes(box, self.original_size)\n box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)\n box_torch = box_torch[None, :]\n if mask_input is not None:\n mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device)\n mask_input_torch = mask_input_torch[None, :, :, :]\n\n masks, iou_predictions, low_res_masks = self.predict_torch(\n coords_torch,\n labels_torch,\n box_torch,\n mask_input_torch,\n multimask_output,\n return_logits=return_logits,\n )\n\n masks_np = masks[0].detach().cpu().numpy()\n iou_predictions_np = iou_predictions[0].detach().cpu().numpy()\n low_res_masks_np = low_res_masks[0].detach().cpu().numpy()\n return masks_np, iou_predictions_np, low_res_masks_np\n\n @torch.no_grad()\n def predict_torch(\n self,\n point_coords: Optional[torch.Tensor],\n point_labels: Optional[torch.Tensor],\n boxes: Optional[torch.Tensor] = None,\n mask_input: Optional[torch.Tensor] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n Input prompts are batched torch tensors and are expected to already be\n transformed to the input frame using ResizeLongestSide.\n\n Arguments:\n point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (torch.Tensor or None): A BxN array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n boxes (np.ndarray or None): A Bx4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form Bx1xHxW, where\n for SAM, H=W=256. Masks returned by a previous iteration of the\n predict method do not need further transformation.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (torch.Tensor): The output masks in BxCxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (torch.Tensor): An array of shape BxC containing the model's\n predictions for the quality of each mask.\n (torch.Tensor): An array of shape BxCxHxW, where C is the number\n of masks and H=W=256. These low res logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\"An image must be set with .set_image(...) before mask prediction.\")\n\n if point_coords is not None:\n points = (point_coords, point_labels)\n else:\n points = None\n\n # Embed prompts\n sparse_embeddings, dense_embeddings = self.model.prompt_encoder(\n points=points,\n boxes=boxes,\n masks=mask_input,\n )\n\n # Predict masks\n low_res_masks, iou_predictions = self.model.mask_decoder(\n image_embeddings=self.features,\n image_pe=self.model.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n\n # Upscale the masks to the original image resolution\n masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size)\n\n if not return_logits:\n masks = masks > self.model.mask_threshold\n\n return masks, iou_predictions, low_res_masks\n\n def get_image_embedding(self) -> torch.Tensor:\n \"\"\"\n Returns the image embeddings for the currently set image, with\n shape 1xCxHxW, where C is the embedding dimension and (H,W) are\n the embedding spatial dimension of SAM (typically C=256, H=W=64).\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) to generate an embedding.\"\n )\n assert self.features is not None, \"Features must exist if an image has been set.\"\n return self.features\n\n @property\n def device(self) -> torch.device:\n return self.model.device\n\n def reset_image(self) -> None:\n \"\"\"Resets the currently set image.\"\"\"\n self.is_image_set = False\n self.features = None\n self.orig_h = None\n self.orig_w = None\n self.input_h = None\n self.input_w = None" }, { "identifier": "MaskData", "path": "utils/amg.py", "snippet": "class MaskData:\n \"\"\"\n A structure for storing masks and their related data in batched format.\n Implements basic filtering and concatenation.\n \"\"\"\n\n def __init__(self, **kwargs) -> None:\n for v in kwargs.values():\n assert isinstance(\n v, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats = dict(**kwargs)\n\n def __setitem__(self, key: str, item: Any) -> None:\n assert isinstance(\n item, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats[key] = item\n\n def __delitem__(self, key: str) -> None:\n del self._stats[key]\n\n def __getitem__(self, key: str) -> Any:\n return self._stats[key]\n\n def items(self) -> ItemsView[str, Any]:\n return self._stats.items()\n\n def filter(self, keep: torch.Tensor) -> None:\n for k, v in self._stats.items():\n if v is None:\n self._stats[k] = None\n elif isinstance(v, torch.Tensor):\n self._stats[k] = v[torch.as_tensor(keep, device=v.device)]\n elif isinstance(v, np.ndarray):\n self._stats[k] = v[keep.detach().cpu().numpy()]\n elif isinstance(v, list) and keep.dtype == torch.bool:\n self._stats[k] = [a for i, a in enumerate(v) if keep[i]]\n elif isinstance(v, list):\n self._stats[k] = [v[i] for i in keep]\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def cat(self, new_stats: \"MaskData\") -> None:\n for k, v in new_stats.items():\n if k not in self._stats or self._stats[k] is None:\n self._stats[k] = deepcopy(v)\n elif isinstance(v, torch.Tensor):\n self._stats[k] = torch.cat([self._stats[k], v], dim=0)\n elif isinstance(v, np.ndarray):\n self._stats[k] = np.concatenate([self._stats[k], v], axis=0)\n elif isinstance(v, list):\n self._stats[k] = self._stats[k] + deepcopy(v)\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def to_numpy(self) -> None:\n for k, v in self._stats.items():\n if isinstance(v, torch.Tensor):\n self._stats[k] = v.detach().cpu().numpy()" }, { "identifier": "area_from_rle", "path": "utils/amg.py", "snippet": "def area_from_rle(rle: Dict[str, Any]) -> int:\n return sum(rle[\"counts\"][1::2])" }, { "identifier": "batch_iterator", "path": "utils/amg.py", "snippet": "def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:\n assert len(args) > 0 and all(\n len(a) == len(args[0]) for a in args\n ), \"Batched iteration must have inputs of all the same size.\"\n n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)\n for b in range(n_batches):\n yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]" }, { "identifier": "batched_mask_to_box", "path": "utils/amg.py", "snippet": "def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Calculates boxes in XYXY format around masks. Return [0,0,0,0] for\n an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.\n \"\"\"\n # torch.max below raises an error on empty inputs, just skip in this case\n if torch.numel(masks) == 0:\n return torch.zeros(*masks.shape[:-2], 4, device=masks.device)\n\n # Normalize shape to CxHxW\n shape = masks.shape\n h, w = shape[-2:]\n if len(shape) > 2:\n masks = masks.flatten(0, -3)\n else:\n masks = masks.unsqueeze(0)\n\n # Get top and bottom edges\n in_height, _ = torch.max(masks, dim=-1)\n in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]\n bottom_edges, _ = torch.max(in_height_coords, dim=-1)\n in_height_coords = in_height_coords + h * (~in_height)\n top_edges, _ = torch.min(in_height_coords, dim=-1)\n\n # Get left and right edges\n in_width, _ = torch.max(masks, dim=-2)\n in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]\n right_edges, _ = torch.max(in_width_coords, dim=-1)\n in_width_coords = in_width_coords + w * (~in_width)\n left_edges, _ = torch.min(in_width_coords, dim=-1)\n\n # If the mask is empty the right edge will be to the left of the left edge.\n # Replace these boxes with [0, 0, 0, 0]\n empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\n out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)\n out = out * (~empty_filter).unsqueeze(-1)\n\n # Return to original shape\n if len(shape) > 2:\n out = out.reshape(*shape[:-2], 4)\n else:\n out = out[0]\n\n return out" }, { "identifier": "box_xyxy_to_xywh", "path": "utils/amg.py", "snippet": "def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:\n box_xywh = deepcopy(box_xyxy)\n box_xywh[2] = box_xywh[2] - box_xywh[0]\n box_xywh[3] = box_xywh[3] - box_xywh[1]\n return box_xywh" }, { "identifier": "build_all_layer_point_grids", "path": "utils/amg.py", "snippet": "def build_all_layer_point_grids(\n n_per_side: int, n_layers: int, scale_per_layer: int\n) -> List[np.ndarray]:\n \"\"\"Generates point grids for all crop layers.\"\"\"\n points_by_layer = []\n for i in range(n_layers + 1):\n n_points = int(n_per_side / (scale_per_layer**i))\n points_by_layer.append(build_point_grid(n_points))\n return points_by_layer" }, { "identifier": "calculate_stability_score", "path": "utils/amg.py", "snippet": "def calculate_stability_score(\n masks: torch.Tensor, mask_threshold: float, threshold_offset: float\n) -> torch.Tensor:\n \"\"\"\n Computes the stability score for a batch of masks. The stability\n score is the IoU between the binary masks obtained by thresholding\n the predicted mask logits at high and low values.\n \"\"\"\n # One mask is always contained inside the other.\n # Save memory by preventing unnecessary cast to torch.int64\n intersections = (\n (masks > (mask_threshold + threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n unions = (\n (masks > (mask_threshold - threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n return intersections / unions" }, { "identifier": "coco_encode_rle", "path": "utils/amg.py", "snippet": "def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:\n from pycocotools import mask as mask_utils # type: ignore\n\n h, w = uncompressed_rle[\"size\"]\n rle = mask_utils.frPyObjects(uncompressed_rle, h, w)\n rle[\"counts\"] = rle[\"counts\"].decode(\"utf-8\") # Necessary to serialize with json\n return rle" }, { "identifier": "generate_crop_boxes", "path": "utils/amg.py", "snippet": "def generate_crop_boxes(\n im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float\n) -> Tuple[List[List[int]], List[int]]:\n \"\"\"\n Generates a list of crop boxes of different sizes. Each layer\n has (2**i)**2 boxes for the ith layer.\n \"\"\"\n crop_boxes, layer_idxs = [], []\n im_h, im_w = im_size\n short_side = min(im_h, im_w)\n\n # Original image\n crop_boxes.append([0, 0, im_w, im_h])\n layer_idxs.append(0)\n\n def crop_len(orig_len, n_crops, overlap):\n return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))\n\n for i_layer in range(n_layers):\n n_crops_per_side = 2 ** (i_layer + 1)\n overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))\n\n crop_w = crop_len(im_w, n_crops_per_side, overlap)\n crop_h = crop_len(im_h, n_crops_per_side, overlap)\n\n crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]\n crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]\n\n # Crops in XYWH format\n for x0, y0 in product(crop_box_x0, crop_box_y0):\n box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]\n crop_boxes.append(box)\n layer_idxs.append(i_layer + 1)\n\n return crop_boxes, layer_idxs" }, { "identifier": "is_box_near_crop_edge", "path": "utils/amg.py", "snippet": "def is_box_near_crop_edge(\n boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0\n) -> torch.Tensor:\n \"\"\"Filter masks at the edge of a crop, but not at the edge of the original image.\"\"\"\n crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)\n orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)\n boxes = uncrop_boxes_xyxy(boxes, crop_box).float()\n near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)\n near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)\n near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)\n return torch.any(near_crop_edge, dim=1)" }, { "identifier": "mask_to_rle_pytorch", "path": "utils/amg.py", "snippet": "def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:\n \"\"\"\n Encodes masks to an uncompressed RLE, in the format expected by\n pycoco tools.\n \"\"\"\n # Put in fortran order and flatten h,w\n b, h, w = tensor.shape\n tensor = tensor.permute(0, 2, 1).flatten(1)\n\n # Compute change indices\n diff = tensor[:, 1:] ^ tensor[:, :-1]\n change_indices = diff.nonzero()\n\n # Encode run length\n out = []\n for i in range(b):\n cur_idxs = change_indices[change_indices[:, 0] == i, 1]\n cur_idxs = torch.cat(\n [\n torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),\n cur_idxs + 1,\n torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),\n ]\n )\n btw_idxs = cur_idxs[1:] - cur_idxs[:-1]\n counts = [] if tensor[i, 0] == 0 else [0]\n counts.extend(btw_idxs.detach().cpu().tolist())\n out.append({\"size\": [h, w], \"counts\": counts})\n return out" }, { "identifier": "remove_small_regions", "path": "utils/amg.py", "snippet": "def remove_small_regions(\n mask: np.ndarray, area_thresh: float, mode: str\n) -> Tuple[np.ndarray, bool]:\n \"\"\"\n Removes small disconnected regions and holes in a mask. Returns the\n mask and an indicator of if the mask has been modified.\n \"\"\"\n import cv2 # type: ignore\n\n assert mode in [\"holes\", \"islands\"]\n correct_holes = mode == \"holes\"\n working_mask = (correct_holes ^ mask).astype(np.uint8)\n n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)\n sizes = stats[:, -1][1:] # Row 0 is background label\n small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]\n if len(small_regions) == 0:\n return mask, False\n fill_labels = [0] + small_regions\n if not correct_holes:\n fill_labels = [i for i in range(n_labels) if i not in fill_labels]\n # If every region is below threshold, keep largest\n if len(fill_labels) == 0:\n fill_labels = [int(np.argmax(sizes)) + 1]\n mask = np.isin(regions, fill_labels)\n return mask, True" }, { "identifier": "rle_to_mask", "path": "utils/amg.py", "snippet": "def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:\n \"\"\"Compute a binary mask from an uncompressed RLE.\"\"\"\n h, w = rle[\"size\"]\n mask = np.empty(h * w, dtype=bool)\n idx = 0\n parity = False\n for count in rle[\"counts\"]:\n mask[idx : idx + count] = parity\n idx += count\n parity ^= True\n mask = mask.reshape(w, h)\n return mask.transpose() # Put in C order" }, { "identifier": "uncrop_boxes_xyxy", "path": "utils/amg.py", "snippet": "def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)\n # Check if boxes has a channel dimension\n if len(boxes.shape) == 3:\n offset = offset.unsqueeze(1)\n return boxes + offset" }, { "identifier": "uncrop_masks", "path": "utils/amg.py", "snippet": "def uncrop_masks(\n masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int\n) -> torch.Tensor:\n x0, y0, x1, y1 = crop_box\n if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:\n return masks\n # Coordinate transform masks\n pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)\n pad = (x0, pad_x - x0, y0, pad_y - y0)\n return torch.nn.functional.pad(masks, pad, value=0)" }, { "identifier": "uncrop_points", "path": "utils/amg.py", "snippet": "def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0]], device=points.device)\n # Check if points has a channel dimension\n if len(points.shape) == 3:\n offset = offset.unsqueeze(1)\n return points + offset" } ]
import numpy as np import torch import cv2 # type: ignore # noqa: F401 from torchvision.ops.boxes import batched_nms, box_area # type: ignore from typing import Any, Dict, List, Optional, Tuple from modeling import Sam from predictor import SamPredictor from utils.amg import ( MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh, build_all_layer_point_grids, calculate_stability_score, coco_encode_rle, generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions, rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points, ) from pycocotools import mask as mask_utils # type: ignore # noqa: F401
12,149
orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes( orig_size, self.crop_n_layers, self.crop_overlap_ratio ) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) data["points"] = uncrop_points(data["points"], crop_box) data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data["iou_preds"] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data["stability_score"] = calculate_stability_score( data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset ) if self.stability_score_thresh > 0.0: keep_mask = data["stability_score"] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data["masks"] = data["masks"] > self.predictor.model.mask_threshold data["boxes"] = batched_mask_to_box(data["masks"]) # Filter boxes that touch crop boundaries keep_mask = ~is_box_near_crop_edge(data["boxes"], crop_box, [0, 0, orig_w, orig_h]) if not torch.all(keep_mask): data.filter(keep_mask) # Compress to RLE data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. class SamMaskGenerator: def __init__( self, model: Sam, points_per_side: Optional[int] = 32, points_per_batch: int = 64, pred_iou_thresh: float = 0.88, stability_score_thresh: float = 0.95, stability_score_offset: float = 1.0, box_nms_thresh: float = 0.7, crop_n_layers: int = 0, crop_nms_thresh: float = 0.7, crop_overlap_ratio: float = 512 / 1500, crop_n_points_downscale_factor: int = 1, point_grids: Optional[List[np.ndarray]] = None, min_mask_region_area: int = 0, output_mode: str = "binary_mask", ) -> None: """ Using a SAM model, generates masks for the entire image. Generates a grid of point prompts over the image, then filters low quality and duplicate masks. The default settings are chosen for SAM with a ViT-H backbone. Arguments: model (Sam): The SAM model to use for mask prediction. points_per_side (int or None): The number of points to be sampled along one side of the image. The total number of points is points_per_side**2. If None, 'point_grids' must provide explicit point sampling. points_per_batch (int): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. pred_iou_thresh (float): A filtering threshold in [0,1], using the model's predicted mask quality. stability_score_thresh (float): A filtering threshold in [0,1], using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (float): The amount to shift the cutoff when calculated the stability score. box_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crop_n_layers (int): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crop_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray) or None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != ( point_grids is None ), "Exactly one of points_per_side or point_grid must be provided." if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in [ "binary_mask", "uncompressed_rle", "coco_rle", ], f"Unknown output_mode {output_mode}." if output_mode == "coco_rle": if min_mask_region_area > 0: self.predictor = SamPredictor(model) self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode @torch.no_grad() def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: """ Generates masks for the given image. Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: segmentation (dict(str, any) or np.ndarray): The mask. If output_mode='binary_mask', is an array of shape HW. Otherwise, is a dictionary containing the RLE. bbox (list(float)): The box around the mask, in XYWH format. area (int): The area in pixels of the mask. predicted_iou (float): The model's own prediction of the mask's quality. This is filtered by the pred_iou_thresh parameter. point_coords (list(list(float))): The point coordinates input to the model to generate this mask. stability_score (float): A measure of the mask's quality. This is filtered on using the stability_score_thresh parameter. crop_box (list(float)): The crop of the image used to generate the mask, given in XYWH format. """ # Generate masks mask_data = self._generate_masks(image) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions( mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]] elif self.output_mode == "binary_mask": mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] else: mask_data["segmentations"] = mask_data["rles"] # Write mask records curr_anns = [] for idx in range(len(mask_data["segmentations"])): ann = { "segmentation": mask_data["segmentations"][idx], "area": area_from_rle(mask_data["rles"][idx]), "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), "predicted_iou": mask_data["iou_preds"][idx].item(), "point_coords": [mask_data["points"][idx].tolist()], "stability_score": mask_data["stability_score"][idx].item(), "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes( orig_size, self.crop_n_layers, self.crop_overlap_ratio ) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) data["points"] = uncrop_points(data["points"], crop_box) data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data["iou_preds"] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data["stability_score"] = calculate_stability_score( data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset ) if self.stability_score_thresh > 0.0: keep_mask = data["stability_score"] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data["masks"] = data["masks"] > self.predictor.model.mask_threshold data["boxes"] = batched_mask_to_box(data["masks"]) # Filter boxes that touch crop boundaries keep_mask = ~is_box_near_crop_edge(data["boxes"], crop_box, [0, 0, orig_w, orig_h]) if not torch.all(keep_mask): data.filter(keep_mask) # Compress to RLE data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w)
data["rles"] = mask_to_rle_pytorch(data["masks"])
12
2023-10-13 20:07:42+00:00
16k
sakemin/cog-musicgen-remixer
audiocraft/modules/conditioners.py
[ { "identifier": "ChromaExtractor", "path": "audiocraft/modules/chroma.py", "snippet": "class ChromaExtractor(nn.Module):\n \"\"\"Chroma extraction and quantization.\n\n Args:\n sample_rate (int): Sample rate for the chroma extraction.\n n_chroma (int): Number of chroma bins for the chroma extraction.\n radix2_exp (int): Size of stft window for the chroma extraction (power of 2, e.g. 12 -> 2^12).\n nfft (int, optional): Number of FFT.\n winlen (int, optional): Window length.\n winhop (int, optional): Window hop size.\n argmax (bool, optional): Whether to use argmax. Defaults to False.\n norm (float, optional): Norm for chroma normalization. Defaults to inf.\n \"\"\"\n def __init__(self, sample_rate: int, n_chroma: int = 12, radix2_exp: int = 12, nfft: tp.Optional[int] = None,\n winlen: tp.Optional[int] = None, winhop: tp.Optional[int] = None, argmax: bool = False,\n norm: float = torch.inf):\n super().__init__()\n self.winlen = winlen or 2 ** radix2_exp\n self.nfft = nfft or self.winlen\n self.winhop = winhop or (self.winlen // 4)\n self.sample_rate = sample_rate\n self.n_chroma = n_chroma\n self.norm = norm\n self.argmax = argmax\n self.register_buffer('fbanks', torch.from_numpy(filters.chroma(sr=sample_rate, n_fft=self.nfft, tuning=0,\n n_chroma=self.n_chroma)), persistent=False)\n self.spec = torchaudio.transforms.Spectrogram(n_fft=self.nfft, win_length=self.winlen,\n hop_length=self.winhop, power=2, center=True,\n pad=0, normalized=True)\n\n def forward(self, wav: torch.Tensor) -> torch.Tensor:\n T = wav.shape[-1]\n # in case we are getting a wav that was dropped out (nullified)\n # from the conditioner, make sure wav length is no less that nfft\n if T < self.nfft:\n pad = self.nfft - T\n r = 0 if pad % 2 == 0 else 1\n wav = F.pad(wav, (pad // 2, pad // 2 + r), 'constant', 0)\n assert wav.shape[-1] == self.nfft, f\"expected len {self.nfft} but got {wav.shape[-1]}\"\n\n spec = self.spec(wav).squeeze(1)\n raw_chroma = torch.einsum('cf,...ft->...ct', self.fbanks, spec)\n norm_chroma = torch.nn.functional.normalize(raw_chroma, p=self.norm, dim=-2, eps=1e-6)\n norm_chroma = rearrange(norm_chroma, 'b d t -> b t d')\n\n if self.argmax:\n idx = norm_chroma.argmax(-1, keepdim=True)\n norm_chroma[:] = 0\n norm_chroma.scatter_(dim=-1, index=idx, value=1)\n\n return norm_chroma" }, { "identifier": "ChordExtractor", "path": "audiocraft/modules/chord_chroma.py", "snippet": "class ChordExtractor(nn.Module):\n\n def __init__(self, device, sample_rate, max_duration, chroma_len, n_chroma, winhop):\n super().__init__()\n self.config = HParams.load(\"/src/audiocraft/modules/btc/run_config.yaml\") #gotta specify the path for run_config.yaml of btc\n\n # self.config.feature['large_voca'] = False\n # self.config.model['num_chords'] = 25\n\n self.model_file = '/src/audiocraft/modules/btc/test/btc_model_large_voca.pt'\n # self.model_file = 'audiocraft/modules/btc/test/btc_model.pt'\n self.idx_to_chord = idx2voca_chord()\n self.sr = sample_rate\n\n self.n_chroma = n_chroma\n self.max_duration = max_duration\n self.chroma_len = chroma_len\n self.to_timebin = self.max_duration/self.chroma_len\n self.timebin = winhop\n\n self.chords = chords.Chords()\n self.device = device\n\n self.denoise_window_size = 7\n self.denoise_threshold = 0.5\n \n self.model = BTC_model(config=self.config.model).to(device)\n if os.path.isfile(self.model_file):\n checkpoint = torch.load(self.model_file)\n self.mean = checkpoint['mean']\n self.std = checkpoint['std']\n self.model.load_state_dict(checkpoint['model'])\n\n def forward(self, wavs:torch.Tensor) -> torch.Tensor:\n sr = self.config.mp3['song_hz']\n chromas = []\n for wav in wavs:\n original_wav = librosa.resample(wav.cpu().numpy(), orig_sr=self.sr, target_sr=sr)\n original_wav = original_wav.squeeze(0)\n # print(original_wav.shape)\n T = original_wav.shape[-1]\n # in case we are getting a wav that was dropped out (nullified)\n # from the conditioner, make sure wav length is no less that nfft\n if T < self.timebin//4:\n pad = self.timebin//4 - T\n r = 0 if pad % 2 == 0 else 1\n original_wav = F.pad(torch.Tensor(original_wav), (pad // 2, pad // 2 + r), 'constant', 0)\n original_wav = original_wav.numpy()\n assert original_wav.shape[-1] == self.timebin//4, f\"expected len {self.timebin//4} but got {original_wav.shape[-1]}\"\n # print(original_wav.shape)\n #preprocess\n currunt_sec_hz = 0\n\n while len(original_wav) > currunt_sec_hz + self.config.mp3['song_hz'] * self.config.mp3['inst_len']:\n start_idx = int(currunt_sec_hz)\n end_idx = int(currunt_sec_hz + self.config.mp3['song_hz'] * self.config.mp3['inst_len'])\n tmp = librosa.cqt(original_wav[start_idx:end_idx], sr=sr, n_bins=self.config.feature['n_bins'], bins_per_octave=self.config.feature['bins_per_octave'], hop_length=self.config.feature['hop_length'])\n if start_idx == 0:\n feature = tmp\n else:\n feature = np.concatenate((feature, tmp), axis=1)\n currunt_sec_hz = end_idx\n \n if currunt_sec_hz == 0:\n feature = librosa.cqt(original_wav[currunt_sec_hz:], sr=sr, n_bins=self.config.feature['n_bins'], bins_per_octave=self.config.feature['bins_per_octave'], hop_length=self.config.feature['hop_length'])\n else:\n tmp = librosa.cqt(original_wav[currunt_sec_hz:], sr=sr, n_bins=self.config.feature['n_bins'], bins_per_octave=self.config.feature['bins_per_octave'], hop_length=self.config.feature['hop_length'])\n feature = np.concatenate((feature, tmp), axis=1)\n # print(feature.shape)\n feature = np.log(np.abs(feature) + 1e-6)\n # print(feature)\n feature_per_second = self.config.mp3['inst_len'] / self.config.model['timestep']\n song_length_second = len(original_wav)/self.config.mp3['song_hz']\n\n feature = feature.T\n feature = (feature - self.mean)/self.std\n\n time_unit = feature_per_second\n n_timestep = self.config.model['timestep']\n\n num_pad = n_timestep - (feature.shape[0] % n_timestep)\n feature = np.pad(feature, ((0, num_pad), (0, 0)), mode=\"constant\", constant_values=0)\n num_instance = feature.shape[0] // n_timestep\n\n #inference\n start_time = 0.0\n lines = []\n with torch.no_grad():\n self.model.eval()\n feature = torch.tensor(feature, dtype=torch.float32).unsqueeze(0).to(self.device)\n for t in range(num_instance):\n self_attn_output, _ = self.model.self_attn_layers(feature[:, n_timestep * t:n_timestep * (t + 1), :])\n prediction, _ = self.model.output_layer(self_attn_output)\n prediction = prediction.squeeze()\n for i in range(n_timestep):\n if t == 0 and i == 0:\n prev_chord = prediction[i].item()\n continue\n if prediction[i].item() != prev_chord:\n lines.append(\n '%.3f %.3f %s\\n' % (start_time, time_unit * (n_timestep * t + i), self.idx_to_chord[prev_chord]))\n start_time = time_unit * (n_timestep * t + i)\n prev_chord = prediction[i].item()\n if t == num_instance - 1 and i + num_pad == n_timestep:\n if start_time != time_unit * (n_timestep * t + i):\n lines.append('%.3f %.3f %s\\n' % (start_time, time_unit * (n_timestep * t + i), self.idx_to_chord[prev_chord]))\n break\n\n strlines = ''.join(lines)\n\n chroma = []\n\n count = 0\n for line in lines:\n if count >= self.chroma_len: \n break\n splits = line.split()\n if len(splits) == 3:\n s = splits[0]\n e = splits[1]\n l = splits[2]\n\n crd = self.chords.chord(l)\n \n if crd[0] == -1:\n multihot = torch.Tensor(crd[2])\n else:\n multihot = torch.concat([torch.Tensor(crd[2])[-crd[0]:],torch.Tensor(crd[2])[:-crd[0]]])\n start_bin = round(float(s)/self.to_timebin)\n end_bin = round(float(e)/self.to_timebin)\n for j in range(start_bin,end_bin):\n if count >= self.chroma_len: \n break\n chroma.append(multihot)\n count += 1\n \n chroma = torch.stack(chroma, dim=0)\n\n # Denoising chroma\n kernel = torch.ones(self.denoise_window_size)/self.denoise_window_size\n\n filtered_signals = []\n for i in range(chroma.shape[-1]):\n filtered_signals.append(torch.nn.functional.conv1d(chroma[...,i].unsqueeze(0),\n kernel.unsqueeze(0).unsqueeze(0).to(chroma.device), \n padding=(self.denoise_window_size - 1) // 2))\n filtered_signals = torch.stack(filtered_signals, dim=-1)\n filtered_signals = filtered_signals > self.denoise_threshold\n\n chromas.append(filtered_signals.squeeze(0))\n \n return torch.stack(chromas, dim=0).to(self.device)" }, { "identifier": "StreamingModule", "path": "audiocraft/modules/streaming.py", "snippet": "class StreamingModule(nn.Module):\n \"\"\"Common API for streaming components.\n\n Each streaming component has a streaming state, which is just a dict[str, Tensor].\n By convention, the first dim of each tensor must be the batch size.\n Don't use dots in the key names, as this would clash with submodules\n (like in state_dict).\n\n If `self._is_streaming` is True, the component should use and remember\n the proper state inside `self._streaming_state`.\n\n To set a streaming component in streaming state, use\n\n with module.streaming():\n ...\n\n This will automatically reset the streaming state when exiting the context manager.\n This also automatically propagates to all streaming children module.\n\n Some module might also implement the `StreamingModule.flush` method, although\n this one is trickier, as all parents module must be StreamingModule and implement\n it as well for it to work properly. See `StreamingSequential` after.\n \"\"\"\n def __init__(self) -> None:\n super().__init__()\n self._streaming_state: State = {}\n self._is_streaming = False\n\n def _apply_named_streaming(self, fn: tp.Any):\n for name, module in self.named_modules():\n if isinstance(module, StreamingModule):\n fn(name, module)\n\n def _set_streaming(self, streaming: bool):\n def _set_streaming(name, module):\n module._is_streaming = streaming\n self._apply_named_streaming(_set_streaming)\n\n @contextmanager\n def streaming(self):\n \"\"\"Context manager to enter streaming mode. Reset streaming state on exit.\"\"\"\n self._set_streaming(True)\n try:\n yield\n finally:\n self._set_streaming(False)\n self.reset_streaming()\n\n def reset_streaming(self):\n \"\"\"Reset the streaming state.\"\"\"\n def _reset(name: str, module: StreamingModule):\n module._streaming_state.clear()\n\n self._apply_named_streaming(_reset)\n\n def get_streaming_state(self) -> State:\n \"\"\"Return the streaming state, including that of sub-modules.\"\"\"\n state: State = {}\n\n def _add(name: str, module: StreamingModule):\n if name:\n name += \".\"\n for key, value in module._streaming_state.items():\n state[name + key] = value\n\n self._apply_named_streaming(_add)\n return state\n\n def set_streaming_state(self, state: State):\n \"\"\"Set the streaming state, including that of sub-modules.\"\"\"\n state = dict(state)\n\n def _set(name: str, module: StreamingModule):\n if name:\n name += \".\"\n module._streaming_state.clear()\n for key, value in list(state.items()):\n # complexity is not ideal here, but probably fine.\n if key.startswith(name):\n local_key = key[len(name):]\n if '.' not in local_key:\n module._streaming_state[local_key] = value\n del state[key]\n\n self._apply_named_streaming(_set)\n assert len(state) == 0, list(state.keys())\n\n def flush(self, x: tp.Optional[torch.Tensor] = None):\n \"\"\"Flush any remaining outputs that were waiting for completion.\n Typically, for convolutions, this will add the final padding\n and process the last buffer.\n\n This should take an optional argument `x`, which will be provided\n if a module before this one in the streaming pipeline has already\n spitted out a flushed out buffer.\n \"\"\"\n if x is None:\n return None\n else:\n return self(x)" }, { "identifier": "create_sin_embedding", "path": "audiocraft/modules/transformer.py", "snippet": "def create_sin_embedding(positions: torch.Tensor, dim: int, max_period: float = 10000,\n dtype: torch.dtype = torch.float32) -> torch.Tensor:\n \"\"\"Create sinusoidal positional embedding, with shape `[B, T, C]`.\n\n Args:\n positions (torch.Tensor): LongTensor of positions.\n dim (int): Dimension of the embedding.\n max_period (float): Maximum period of the cosine/sine functions.\n dtype (torch.dtype or str): dtype to use to generate the embedding.\n Returns:\n torch.Tensor: Sinusoidal positional embedding.\n \"\"\"\n # We aim for BTC format\n assert dim % 2 == 0\n half_dim = dim // 2\n positions = positions.to(dtype)\n adim = torch.arange(half_dim, device=positions.device, dtype=dtype).view(1, 1, -1)\n max_period_tensor = torch.full([], max_period, device=positions.device, dtype=dtype) # avoid sync point\n phase = positions / (max_period_tensor ** (adim / (half_dim - 1)))\n return torch.cat([torch.cos(phase), torch.sin(phase)], dim=-1)" }, { "identifier": "audio_read", "path": "audiocraft/data/audio.py", "snippet": "def audio_read(filepath: tp.Union[str, Path], seek_time: float = 0.,\n duration: float = -1., pad: bool = False) -> tp.Tuple[torch.Tensor, int]:\n \"\"\"Read audio by picking the most appropriate backend tool based on the audio format.\n\n Args:\n filepath (str or Path): Path to audio file to read.\n seek_time (float): Time at which to start reading in the file.\n duration (float): Duration to read from the file. If set to -1, the whole file is read.\n pad (bool): Pad output audio if not reaching expected duration.\n Returns:\n tuple of torch.Tensor, int: Tuple containing audio data and sample rate.\n \"\"\"\n fp = Path(filepath)\n if fp.suffix in ['.flac', '.ogg']: # TODO: check if we can safely use av_read for .ogg\n # There is some bug with ffmpeg and reading flac\n info = _soundfile_info(filepath)\n frames = -1 if duration <= 0 else int(duration * info.sample_rate)\n frame_offset = int(seek_time * info.sample_rate)\n wav, sr = soundfile.read(filepath, start=frame_offset, frames=frames, dtype=np.float32)\n assert info.sample_rate == sr, f\"Mismatch of sample rates {info.sample_rate} {sr}\"\n wav = torch.from_numpy(wav).t().contiguous()\n if len(wav.shape) == 1:\n wav = torch.unsqueeze(wav, 0)\n else:\n wav, sr = _av_read(filepath, seek_time, duration)\n if pad and duration > 0:\n expected_frames = int(duration * sr)\n wav = F.pad(wav, (0, expected_frames - wav.shape[-1]))\n return wav, sr" }, { "identifier": "SegmentInfo", "path": "audiocraft/data/audio_dataset.py", "snippet": "class SegmentInfo(BaseInfo):\n meta: AudioMeta\n seek_time: float\n # The following values are given once the audio is processed, e.g.\n # at the target sample rate and target number of channels.\n n_frames: int # actual number of frames without padding\n total_frames: int # total number of frames, padding included\n sample_rate: int # actual sample rate\n channels: int # number of audio channels." }, { "identifier": "convert_audio", "path": "audiocraft/data/audio_utils.py", "snippet": "def convert_audio(wav: torch.Tensor, from_rate: float,\n to_rate: float, to_channels: int) -> torch.Tensor:\n \"\"\"Convert audio to new sample rate and number of audio channels.\"\"\"\n wav = julius.resample_frac(wav, int(from_rate), int(to_rate))\n wav = convert_audio_channels(wav, to_channels)\n return wav" }, { "identifier": "AudioCraftEnvironment", "path": "audiocraft/environment.py", "snippet": "class AudioCraftEnvironment:\n \"\"\"Environment configuration for teams and clusters.\n\n AudioCraftEnvironment picks compute cluster settings (slurm, dora) from the current running environment\n or declared variable and the loaded team configuration. Additionally, the AudioCraftEnvironment\n provides pointers to a reference folder resolved automatically across clusters that is shared across team members,\n allowing to share sigs or other files to run jobs. Finally, it provides dataset mappers to automatically\n map dataset file paths to new locations across clusters, allowing to use the same manifest of files across cluters.\n\n The cluster type is identified automatically and base configuration file is read from config/teams.yaml.\n Use the following environment variables to specify the cluster, team or configuration:\n\n AUDIOCRAFT_CLUSTER (optional): Cluster type to enforce. Useful if the cluster type\n cannot be inferred automatically.\n AUDIOCRAFT_CONFIG (optional): Path to yaml config holding the teams configuration.\n If not set, configuration is read from config/teams.yaml.\n AUDIOCRAFT_TEAM (optional): Name of the team. Recommended to set to your own team.\n Cluster configuration are shared across teams to match compute allocation,\n specify your cluster configuration in the configuration file under a key mapping\n your team name.\n \"\"\"\n _instance = None\n DEFAULT_TEAM = \"default\"\n\n def __init__(self) -> None:\n \"\"\"Loads configuration.\"\"\"\n self.team: str = os.getenv(\"AUDIOCRAFT_TEAM\", self.DEFAULT_TEAM)\n cluster_type = _guess_cluster_type()\n cluster = os.getenv(\n \"AUDIOCRAFT_CLUSTER\", cluster_type.value\n )\n logger.info(\"Detecting cluster type %s\", cluster_type)\n\n self.cluster: str = cluster\n\n config_path = os.getenv(\n \"AUDIOCRAFT_CONFIG\",\n Path(__file__)\n .parent.parent.joinpath(\"config/teams\", self.team)\n .with_suffix(\".yaml\"),\n )\n self.config = omegaconf.OmegaConf.load(config_path)\n self._dataset_mappers = []\n cluster_config = self._get_cluster_config()\n if \"dataset_mappers\" in cluster_config:\n for pattern, repl in cluster_config[\"dataset_mappers\"].items():\n regex = re.compile(pattern)\n self._dataset_mappers.append((regex, repl))\n\n def _get_cluster_config(self) -> omegaconf.DictConfig:\n assert isinstance(self.config, omegaconf.DictConfig)\n return self.config[self.cluster]\n\n @classmethod\n def instance(cls):\n if cls._instance is None:\n cls._instance = cls()\n return cls._instance\n\n @classmethod\n def reset(cls):\n \"\"\"Clears the environment and forces a reload on next invocation.\"\"\"\n cls._instance = None\n\n @classmethod\n def get_team(cls) -> str:\n \"\"\"Gets the selected team as dictated by the AUDIOCRAFT_TEAM env var.\n If not defined, defaults to \"labs\".\n \"\"\"\n return cls.instance().team\n\n @classmethod\n def get_cluster(cls) -> str:\n \"\"\"Gets the detected cluster.\n This value can be overridden by the AUDIOCRAFT_CLUSTER env var.\n \"\"\"\n return cls.instance().cluster\n\n @classmethod\n def get_dora_dir(cls) -> Path:\n \"\"\"Gets the path to the dora directory for the current team and cluster.\n Value is overridden by the AUDIOCRAFT_DORA_DIR env var.\n \"\"\"\n cluster_config = cls.instance()._get_cluster_config()\n dora_dir = os.getenv(\"AUDIOCRAFT_DORA_DIR\", cluster_config[\"dora_dir\"])\n logger.warning(f\"Dora directory: {dora_dir}\")\n return Path(dora_dir)\n\n @classmethod\n def get_reference_dir(cls) -> Path:\n \"\"\"Gets the path to the reference directory for the current team and cluster.\n Value is overridden by the AUDIOCRAFT_REFERENCE_DIR env var.\n \"\"\"\n cluster_config = cls.instance()._get_cluster_config()\n return Path(os.getenv(\"AUDIOCRAFT_REFERENCE_DIR\", cluster_config[\"reference_dir\"]))\n\n @classmethod\n def get_slurm_exclude(cls) -> tp.Optional[str]:\n \"\"\"Get the list of nodes to exclude for that cluster.\"\"\"\n cluster_config = cls.instance()._get_cluster_config()\n return cluster_config.get(\"slurm_exclude\")\n\n @classmethod\n def get_slurm_partitions(cls, partition_types: tp.Optional[tp.List[str]] = None) -> str:\n \"\"\"Gets the requested partitions for the current team and cluster as a comma-separated string.\n\n Args:\n partition_types (list[str], optional): partition types to retrieve. Values must be\n from ['global', 'team']. If not provided, the global partition is returned.\n \"\"\"\n if not partition_types:\n partition_types = [\"global\"]\n\n cluster_config = cls.instance()._get_cluster_config()\n partitions = [\n cluster_config[\"partitions\"][partition_type]\n for partition_type in partition_types\n ]\n return \",\".join(partitions)\n\n @classmethod\n def resolve_reference_path(cls, path: tp.Union[str, Path]) -> Path:\n \"\"\"Converts reference placeholder in path with configured reference dir to resolve paths.\n\n Args:\n path (str or Path): Path to resolve.\n Returns:\n Path: Resolved path.\n \"\"\"\n path = str(path)\n\n if path.startswith(\"//reference\"):\n reference_dir = cls.get_reference_dir()\n logger.warn(f\"Reference directory: {reference_dir}\")\n assert (\n reference_dir.exists() and reference_dir.is_dir()\n ), f\"Reference directory does not exist: {reference_dir}.\"\n path = re.sub(\"^//reference\", str(reference_dir), path)\n\n return Path(path)\n\n @classmethod\n def apply_dataset_mappers(cls, path: str) -> str:\n \"\"\"Applies dataset mapping regex rules as defined in the configuration.\n If no rules are defined, the path is returned as-is.\n \"\"\"\n instance = cls.instance()\n\n for pattern, repl in instance._dataset_mappers:\n path = pattern.sub(repl, path)\n\n return path" }, { "identifier": "ResidualVectorQuantizer", "path": "audiocraft/quantization/vq.py", "snippet": "class ResidualVectorQuantizer(BaseQuantizer):\n \"\"\"Residual Vector Quantizer.\n\n Args:\n dimension (int): Dimension of the codebooks.\n n_q (int): Number of residual vector quantizers used.\n q_dropout (bool): Random quantizer drop out at train time.\n bins (int): Codebook size.\n decay (float): Decay for exponential moving average over the codebooks.\n kmeans_init (bool): Whether to use kmeans to initialize the codebooks.\n kmeans_iters (int): Number of iterations used for kmeans initialization.\n threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes\n that have an exponential moving average cluster size less than the specified threshold with\n randomly selected vector from the current batch.\n orthogonal_reg_weight (float): Orthogonal regularization weights.\n orthogonal_reg_active_codes_only (bool): Apply orthogonal regularization only on active codes.\n orthogonal_reg_max_codes (optional int): Maximum number of codes to consider.\n for orthogonal regularization.\n \"\"\"\n def __init__(\n self,\n dimension: int = 256,\n n_q: int = 8,\n q_dropout: bool = False,\n bins: int = 1024,\n decay: float = 0.99,\n kmeans_init: bool = True,\n kmeans_iters: int = 10,\n threshold_ema_dead_code: int = 2,\n orthogonal_reg_weight: float = 0.0,\n orthogonal_reg_active_codes_only: bool = False,\n orthogonal_reg_max_codes: tp.Optional[int] = None,\n ):\n super().__init__()\n self.max_n_q = n_q\n self.n_q = n_q\n self.q_dropout = q_dropout\n self.dimension = dimension\n self.bins = bins\n self.decay = decay\n self.kmeans_init = kmeans_init\n self.kmeans_iters = kmeans_iters\n self.threshold_ema_dead_code = threshold_ema_dead_code\n self.orthogonal_reg_weight = orthogonal_reg_weight\n self.orthogonal_reg_active_codes_only = orthogonal_reg_active_codes_only\n self.orthogonal_reg_max_codes = orthogonal_reg_max_codes\n self.vq = ResidualVectorQuantization(\n dim=self.dimension,\n codebook_size=self.bins,\n num_quantizers=self.n_q,\n decay=self.decay,\n kmeans_init=self.kmeans_init,\n kmeans_iters=self.kmeans_iters,\n threshold_ema_dead_code=self.threshold_ema_dead_code,\n orthogonal_reg_weight=self.orthogonal_reg_weight,\n orthogonal_reg_active_codes_only=self.orthogonal_reg_active_codes_only,\n orthogonal_reg_max_codes=self.orthogonal_reg_max_codes,\n channels_last=False\n )\n\n def forward(self, x: torch.Tensor, frame_rate: int):\n n_q = self.n_q\n if self.training and self.q_dropout:\n n_q = int(torch.randint(1, self.n_q + 1, (1,)).item())\n bw_per_q = math.log2(self.bins) * frame_rate / 1000\n quantized, codes, commit_loss = self.vq(x, n_q=n_q)\n codes = codes.transpose(0, 1)\n # codes is [B, K, T], with T frames, K nb of codebooks.\n bw = torch.tensor(n_q * bw_per_q).to(x)\n return QuantizedResult(quantized, codes, bw, penalty=torch.mean(commit_loss))\n\n def encode(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Encode a given input tensor with the specified frame rate at the given bandwidth.\n The RVQ encode method sets the appropriate number of quantizer to use\n and returns indices for each quantizer.\n \"\"\"\n n_q = self.n_q\n codes = self.vq.encode(x, n_q=n_q)\n codes = codes.transpose(0, 1)\n # codes is [B, K, T], with T frames, K nb of codebooks.\n return codes\n\n def decode(self, codes: torch.Tensor) -> torch.Tensor:\n \"\"\"Decode the given codes to the quantized representation.\"\"\"\n # codes is [B, K, T], with T frames, K nb of codebooks, vq.decode expects [K, B, T].\n codes = codes.transpose(0, 1)\n quantized = self.vq.decode(codes)\n return quantized\n\n @property\n def total_codebooks(self):\n return self.max_n_q\n\n @property\n def num_codebooks(self):\n return self.n_q\n\n def set_num_codebooks(self, n: int):\n assert n > 0 and n <= self.max_n_q\n self.n_q = n" }, { "identifier": "TorchAutocast", "path": "audiocraft/utils/autocast.py", "snippet": "class TorchAutocast:\n \"\"\"TorchAutocast utility class.\n Allows you to enable and disable autocast. This is specially useful\n when dealing with different architectures and clusters with different\n levels of support.\n\n Args:\n enabled (bool): Whether to enable torch.autocast or not.\n args: Additional args for torch.autocast.\n kwargs: Additional kwargs for torch.autocast\n \"\"\"\n def __init__(self, enabled: bool, *args, **kwargs):\n self.autocast = torch.autocast(*args, **kwargs) if enabled else None\n\n def __enter__(self):\n if self.autocast is None:\n return\n try:\n self.autocast.__enter__()\n except RuntimeError:\n device = self.autocast.device\n dtype = self.autocast.fast_dtype\n raise RuntimeError(\n f\"There was an error autocasting with dtype={dtype} device={device}\\n\"\n \"If you are on the FAIR Cluster, you might need to use autocast_dtype=float16\"\n )\n\n def __exit__(self, *args, **kwargs):\n if self.autocast is None:\n return\n self.autocast.__exit__(*args, **kwargs)" }, { "identifier": "EmbeddingCache", "path": "audiocraft/utils/cache.py", "snippet": "class EmbeddingCache:\n \"\"\"Cache around embeddings computation for faster execution.\n The EmbeddingCache is storing pre-computed embeddings on disk and provides a simple API\n to retrieve the pre-computed embeddings on full inputs and extract only a given chunk\n using a user-provided function. When the cache is warm (all embeddings are pre-computed),\n the EmbeddingCache allows for faster training as it removes the need of computing the embeddings.\n Additionally, it provides in-memory cache around the loaded embeddings to limit IO footprint\n and synchronization points in the forward calls.\n\n Args:\n cache_path (Path): Path to folder where all pre-computed embeddings are saved on disk.\n device (str or torch.device): Device on which the embedding is returned.\n compute_embed_fn (callable[[Path, any, int], torch.Tensor], optional): Function to compute\n the embedding from a given object and path. This user provided function can compute the\n embedding from the provided object or using the provided path as entry point. The last parameter\n specify the index corresponding to the current embedding in the object that can represent batch metadata.\n extract_embed_fn (callable[[torch.Tensor, any, int], torch.Tensor], optional): Function to extract\n the desired embedding chunk from the full embedding loaded from the cache. The last parameter\n specify the index corresponding to the current embedding in the object that can represent batch metadata.\n If not specified, will return the full embedding unmodified.\n \"\"\"\n def __init__(self, cache_path: tp.Union[str, Path], device: tp.Union[str, torch.device],\n compute_embed_fn: tp.Callable[[Path, tp.Any, int], torch.Tensor],\n extract_embed_fn: tp.Optional[tp.Callable[[torch.Tensor, tp.Any, int], torch.Tensor]] = None):\n self.cache_path = Path(cache_path)\n self.device = device\n self._compute_embed_fn = compute_embed_fn\n self._extract_embed_fn: tp.Callable[[torch.Tensor, tp.Any, int], torch.Tensor]\n if extract_embed_fn is not None:\n self._extract_embed_fn = extract_embed_fn\n else:\n self._extract_embed_fn = partial(get_full_embed, device=device)\n if self.cache_path is not None:\n self.cache_path.mkdir(exist_ok=True, parents=True)\n logger.info(f\"Cache instantiated at: {self.cache_path}\")\n self.pool = ThreadPoolExecutor(8)\n self.pool.__enter__()\n self._current_batch_cache: dict = {}\n self._memory_cache: dict = {}\n\n def _get_cache_path(self, path: tp.Union[Path, str]):\n \"\"\"Get cache path for the given file path.\"\"\"\n sig = sha1(str(path).encode()).hexdigest()\n return self.cache_path / sig\n\n @staticmethod\n def _get_full_embed_from_cache(cache: Path):\n \"\"\"Loads full pre-computed embedding from the cache.\"\"\"\n try:\n embed = torch.load(cache, 'cpu')\n except Exception as exc:\n logger.error(\"Error loading %s: %r\", cache, exc)\n embed = None\n return embed\n\n def get_embed_from_cache(self, paths: tp.List[Path], x: tp.Any) -> torch.Tensor:\n \"\"\"Get embedding from cache, computing and storing it to cache if not already cached.\n The EmbeddingCache first tries to load the embedding from the in-memory cache\n containing the pre-computed chunks populated through `populate_embed_cache`.\n If not found, the full embedding is computed and stored on disk to be later accessed\n to populate the in-memory cache, and the desired embedding chunk is extracted and returned.\n\n Args:\n paths (list[Path or str]): List of paths from where the embeddings can be loaded.\n x (any): Object from which the embedding is extracted.\n \"\"\"\n embeds = []\n for idx, path in enumerate(paths):\n cache = self._get_cache_path(path)\n if cache in self._current_batch_cache:\n embed = self._current_batch_cache[cache]\n else:\n full_embed = self._compute_embed_fn(path, x, idx)\n try:\n with flashy.utils.write_and_rename(cache, pid=True) as f:\n torch.save(full_embed.cpu(), f)\n except Exception as exc:\n logger.error('Error saving embed %s (%s): %r', cache, full_embed.shape, exc)\n else:\n logger.info('New embed cache saved: %s (%s)', cache, full_embed.shape)\n embed = self._extract_embed_fn(full_embed, x, idx)\n embeds.append(embed)\n embed = torch.stack(embeds, dim=0)\n return embed\n\n def populate_embed_cache(self, paths: tp.List[Path], x: tp.Any) -> None:\n \"\"\"Populate in-memory caches for embeddings reading from the embeddings stored on disk.\n The in-memory caches consist in a cache for the full embedding and another cache for the\n final embedding chunk. Such caches are used to limit the IO access when computing the actual embeddings\n and reduce the IO footprint and synchronization points during forward passes.\n\n Args:\n paths (list[Path]): List of paths from where the embeddings can be loaded.\n x (any): Object from which the embedding is extracted.\n \"\"\"\n self._current_batch_cache.clear()\n if self.cache_path is not None:\n futures: list = []\n for path in paths:\n assert path is not None, \"Path is required for computation from cache\"\n cache = self._get_cache_path(path)\n if cache in self._memory_cache or not cache.exists():\n futures.append(None)\n else:\n futures.append(self.pool.submit(EmbeddingCache._get_full_embed_from_cache, cache))\n for idx, (path, future) in enumerate(zip(paths, futures)):\n assert path is not None\n cache = self._get_cache_path(path)\n full_embed = None\n if future is None:\n if cache in self._memory_cache:\n full_embed = self._memory_cache[cache]\n else:\n full_embed = future.result()\n if full_embed is not None:\n self._memory_cache[cache] = full_embed\n full_embed = full_embed.to(self.device)\n if full_embed is not None:\n embed = self._extract_embed_fn(full_embed, x, idx)\n self._current_batch_cache[cache] = embed" }, { "identifier": "collate", "path": "audiocraft/utils/utils.py", "snippet": "def collate(tensors: tp.List[torch.Tensor], dim: int = 0) -> tp.Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Get a list of tensors and collate them to a single tensor. according to the following logic:\n - `dim` specifies the time dimension which will be stacked and padded.\n - The output will contain 1 new dimension (dimension index 0) which will be the size of\n of the original list.\n\n Args:\n tensors (tp.List[torch.Tensor]): List of tensors to collate.\n dim (int): Dimension which will be stacked and padded.\n Returns:\n tp.Tuple[torch.Tensor, torch.Tensor]:\n torch.Tensor: Stacked and padded tensor. The output will contain 1 new dimension\n (dimension index 0) which will be the size of the original list.\n torch.Tensor: Tensor containing length of original tensor sizes (without padding).\n \"\"\"\n tensors = [x.transpose(0, dim) for x in tensors]\n lens = torch.LongTensor([len(x) for x in tensors])\n padded_tensors = pad_sequence(tensors)\n padded_tensors = padded_tensors.transpose(0, 1)\n padded_tensors = padded_tensors.transpose(1, dim + 1)\n return padded_tensors, lens" }, { "identifier": "hash_trick", "path": "audiocraft/utils/utils.py", "snippet": "def hash_trick(word: str, vocab_size: int) -> int:\n \"\"\"Hash trick to pair each word with an index\n\n Args:\n word (str): word we wish to convert to an index\n vocab_size (int): size of the vocabulary\n Returns:\n int: index of the word in the embedding LUT\n \"\"\"\n hash = int(hashlib.sha256(word.encode(\"utf-8\")).hexdigest(), 16)\n return hash % vocab_size" }, { "identifier": "length_to_mask", "path": "audiocraft/utils/utils.py", "snippet": "def length_to_mask(lengths: torch.Tensor, max_len: tp.Optional[int] = None) -> torch.Tensor:\n \"\"\"Utility function to convert a tensor of sequence lengths to a mask (useful when working on padded sequences).\n For example: [3, 5] => [[1, 1, 1, 0, 0], [1, 1, 1, 1, 1]]\n\n Args:\n lengths (torch.Tensor): tensor with lengths\n max_len (int): can set the max length manually. Defaults to None.\n Returns:\n torch.Tensor: mask with 0s where there is pad tokens else 1s\n \"\"\"\n assert len(lengths.shape) == 1, \"Length shape should be 1 dimensional.\"\n final_length = lengths.max().item() if not max_len else max_len\n final_length = max(final_length, 1) # if all seqs are of len zero we don't want a zero-size tensor\n return torch.arange(final_length, device=lengths.device)[None, :] < lengths[:, None]" }, { "identifier": "load_clap_state_dict", "path": "audiocraft/utils/utils.py", "snippet": "def load_clap_state_dict(clap_model, path: tp.Union[str, Path]):\n \"\"\"Wrapper around state dict loading of CLAP model\n addressing compatibility issues between CLAP and AudioCraft\n HuggingFace transformer version.\n See: https://github.com/LAION-AI/CLAP/issues/118\n \"\"\"\n from clap_module.factory import load_state_dict # type: ignore\n pkg = load_state_dict(path)\n pkg.pop('text_branch.embeddings.position_ids', None)\n clap_model.model.load_state_dict(pkg)" }, { "identifier": "warn_once", "path": "audiocraft/utils/utils.py", "snippet": "@lru_cache(None)\ndef warn_once(logger, msg):\n \"\"\"Warn about a given message only once.\"\"\"\n logger.warning(msg)" }, { "identifier": "chords", "path": "audiocraft/modules/btc/utils/chords.py", "snippet": "def chords(self, labels):\n\n \"\"\"\n Transform a list of chord labels into an array of internal numeric\n representations.\n\n Parameters\n ----------\n labels : list\n List of chord labels (str).\n\n Returns\n -------\n chords : numpy.array\n Structured array with columns 'root', 'bass', and 'intervals',\n containing a numeric representation of chords.\n\n \"\"\"\n crds = np.zeros(len(labels), dtype=CHORD_DTYPE)\n cache = {}\n for i, lbl in enumerate(labels):\n cv = cache.get(lbl, None)\n if cv is None:\n cv = self.chord(lbl)\n cache[lbl] = cv\n crds[i] = cv\n\n return crds" } ]
from collections import defaultdict from copy import deepcopy from dataclasses import dataclass, field from itertools import chain from pathlib import Path from num2words import num2words from transformers import RobertaTokenizer, T5EncoderModel, T5Tokenizer # type: ignore from torch import nn from torch.nn.utils.rnn import pad_sequence from .chroma import ChromaExtractor from .chord_chroma import ChordExtractor from .streaming import StreamingModule from .transformer import create_sin_embedding from ..data.audio import audio_read from ..data.audio_dataset import SegmentInfo from ..data.audio_utils import convert_audio from ..environment import AudioCraftEnvironment from ..quantization import ResidualVectorQuantizer from ..utils.autocast import TorchAutocast from ..utils.cache import EmbeddingCache from ..utils.utils import collate, hash_trick, length_to_mask, load_clap_state_dict, warn_once from .btc.utils import chords from demucs import pretrained from audiocraft.data.audio_dataset import AudioDataset from demucs.apply import apply_model from demucs.audio import convert_audio from demucs import pretrained from audiocraft.data.audio_dataset import AudioDataset from demucs.apply import apply_model from demucs.audio import convert_audio import logging import math import random import re import typing as tp import warnings import einops import spacy import torch import torch.nn.functional as F import numpy as np import laion_clap # type: ignore
13,735
embeds = self.output_proj(embeds) if self.match_len_on_eval: if lengths is not None: for i in range(len(lengths)): if lengths[i] > 0 and lengths[i] < self.duration * self.sample_rate: lengths[i] = torch.Tensor([(self.duration+1) * self.sample_rate]) lengths = lengths / self._downsampling_factor() mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore else: mask = torch.ones_like(embeds) else: if lengths is not None: lengths = lengths / self._downsampling_factor() mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore else: mask = torch.ones_like(embeds) embeds = (embeds.to(self.device) * mask.unsqueeze(2).to(self.device)) return embeds.to(self.device), mask.to(self.device) class JointEmbeddingConditioner(BaseConditioner): """Joint embedding conditioning supporting both audio or text conditioning. Args: dim (int): Dimension. output_dim (int): Output dimension. device (str): Device. attribute (str): Attribute used by the conditioner. autocast_dtype (str): Autocast for the conditioner. quantize (bool): Whether to quantize the CLAP embedding. n_q (int): Number of residual quantizers (used if quantize is true). bins (int): Quantizers' codebooks size (used if quantize is true). kwargs: Additional parameters for residual vector quantizer. """ def __init__(self, dim: int, output_dim: int, device: str, attribute: str, autocast_dtype: tp.Optional[str] = 'float32', quantize: bool = True, n_q: int = 12, bins: int = 1024, **kwargs): super().__init__(dim=dim, output_dim=output_dim) self.device = device self.attribute = attribute if autocast_dtype is None or device == 'cpu': self.autocast = TorchAutocast(enabled=False) logger.warning("JointEmbeddingConditioner has no autocast, this might lead to NaN.") else: dtype = getattr(torch, autocast_dtype) assert isinstance(dtype, torch.dtype) logger.info(f"JointEmbeddingConditioner will be evaluated with autocast as {autocast_dtype}.") self.autocast = TorchAutocast(enabled=True, device_type=self.device, dtype=dtype) # residual vector quantizer to discretize the conditioned embedding self.quantizer: tp.Optional[ResidualVectorQuantizer] = None if quantize: self.quantizer = ResidualVectorQuantizer(dim, n_q=n_q, bins=bins, **kwargs) def _get_embed(self, x: JointEmbedCondition) -> tp.Tuple[torch.Tensor, torch.Tensor]: """Get joint embedding in latent space from the inputs. Returns: tuple[torch.Tensor, torch.Tensor]: Tensor for the latent embedding and corresponding empty indexes. """ raise NotImplementedError() def forward(self, x: JointEmbedCondition) -> ConditionType: with self.autocast: embed, empty_idx = self._get_embed(x) if self.quantizer is not None: embed = embed.view(-1, self.dim, 1) q_res = self.quantizer(embed, frame_rate=1) out_embed = q_res.x.view(-1, self.dim) else: out_embed = embed out_embed = self.output_proj(out_embed).view(-1, 1, self.output_dim) mask = torch.ones(*out_embed.shape[:2], device=out_embed.device) mask[empty_idx, :] = 0 # zero-out index where the input is non-existant out_embed = (out_embed * mask.unsqueeze(-1)) return out_embed, mask def tokenize(self, x: JointEmbedCondition) -> JointEmbedCondition: return x class CLAPEmbeddingConditioner(JointEmbeddingConditioner): """Joint Embedding conditioner based on pre-trained CLAP model. This CLAP-based conditioner supports a caching mechanism over the computed embeddings for faster training. Args: dim (int): Dimension. output_dim (int): Output dimension. device (str): Device. attribute (str): Attribute used by the conditioner. quantize (bool): Whether to quantize the CLAP embedding. n_q (int): Number of residual quantizers (used if quantize is true). bins (int): Quantizers' codebooks size (used if quantize is true). checkpoint (str): Path to CLAP checkpoint. model_arch (str): CLAP model architecture. enable_fusion (bool): Enable fusion for CLAP model. sample_rate (int): Sample rate used by CLAP model. max_audio_length (float): Maximum audio length for CLAP model. audio_stride (float): Stride to use for getting a CLAP embedding on the full sequence. normalize (bool): Whether to normalize the CLAP embedding. text_p (float): Probability of using text representation instead of audio at train time. batch_size (Optional[int]): Batch size for CLAP embedding computation. autocast_dtype (str): Autocast for the conditioner. cache_path (Optional[str]): Path for pre-computed embeddings caching. kwargs: Additional parameters for residual vector quantizer. """ def __init__(self, dim: int, output_dim: int, device: str, attribute: str, quantize: bool, n_q: int, bins: int, checkpoint: tp.Union[str, Path], model_arch: str, enable_fusion: bool, sample_rate: int, max_audio_length: int, audio_stride: int, normalize: bool, text_p: bool, batch_size: tp.Optional[int] = None, autocast_dtype: tp.Optional[str] = 'float32', cache_path: tp.Optional[str] = None, **kwargs): try: except ImportError: raise ImportError("Please install CLAP to use the CLAPEmbeddingConditioner: 'pip install laion_clap'") warnings.warn("Sample rate for CLAP conditioner was fixed in version v1.1.0, (from 44.1 to 48 kHz). " "Please retrain all models.")
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. logger = logging.getLogger(__name__) TextCondition = tp.Optional[str] # a text condition can be a string or None (if doesn't exist) ConditionType = tp.Tuple[torch.Tensor, torch.Tensor] # condition, mask class WavCondition(tp.NamedTuple): wav: torch.Tensor length: torch.Tensor sample_rate: tp.List[int] path: tp.List[tp.Optional[str]] = [] seek_time: tp.List[tp.Optional[float]] = [] class WavChordTextCondition(tp.NamedTuple): wav: tp.Union[torch.Tensor,str,tp.List[str]] length: torch.Tensor sample_rate: tp.List[int] path: tp.List[tp.Optional[str]] = [] seek_time: tp.List[tp.Optional[float]] = [] bpm : tp.List[tp.Optional[tp.Union[int, float]]] = [] meter : tp.List[tp.Optional[int]] = [] class JointEmbedCondition(tp.NamedTuple): wav: torch.Tensor text: tp.List[tp.Optional[str]] length: torch.Tensor sample_rate: tp.List[int] path: tp.List[tp.Optional[str]] = [] seek_time: tp.List[tp.Optional[float]] = [] @dataclass class ConditioningAttributes: text: tp.Dict[str, tp.Optional[str]] = field(default_factory=dict) wav: tp.Dict[str, tp.Union[WavCondition,WavChordTextCondition]] = field(default_factory=dict) joint_embed: tp.Dict[str, JointEmbedCondition] = field(default_factory=dict) def __getitem__(self, item): return getattr(self, item) @property def text_attributes(self): return self.text.keys() @property def wav_attributes(self): return self.wav.keys() @property def joint_embed_attributes(self): return self.joint_embed.keys() @property def attributes(self): return { "text": self.text_attributes, "wav": self.wav_attributes, "joint_embed": self.joint_embed_attributes, } def to_flat_dict(self): return { **{f"text.{k}": v for k, v in self.text.items()}, **{f"wav.{k}": v for k, v in self.wav.items()}, **{f"joint_embed.{k}": v for k, v in self.joint_embed.items()} } @classmethod def from_flat_dict(cls, x): out = cls() for k, v in x.items(): kind, att = k.split(".") out[kind][att] = v return out class SegmentWithAttributes(SegmentInfo): """Base class for all dataclasses that are used for conditioning. All child classes should implement `to_condition_attributes` that converts the existing attributes to a dataclass of type ConditioningAttributes. """ def to_condition_attributes(self) -> ConditioningAttributes: raise NotImplementedError() def nullify_condition(condition: ConditionType, dim: int = 1): """Transform an input condition to a null condition. The way it is done by converting it to a single zero vector similarly to how it is done inside WhiteSpaceTokenizer and NoopTokenizer. Args: condition (ConditionType): A tuple of condition and mask (tuple[torch.Tensor, torch.Tensor]) dim (int): The dimension that will be truncated (should be the time dimension) WARNING!: dim should not be the batch dimension! Returns: ConditionType: A tuple of null condition and mask """ assert dim != 0, "dim cannot be the batch dimension!" assert isinstance(condition, tuple) and \ isinstance(condition[0], torch.Tensor) and \ isinstance(condition[1], torch.Tensor), "'nullify_condition' got an unexpected input type!" cond, mask = condition B = cond.shape[0] last_dim = cond.dim() - 1 out = cond.transpose(dim, last_dim) out = 0. * out[..., :1] out = out.transpose(dim, last_dim) mask = torch.zeros((B, 1), device=out.device).int() assert cond.dim() == out.dim() return out, mask def nullify_wav(cond: tp.Union[WavCondition,WavChordTextCondition]) -> tp.Union[WavCondition,WavChordTextCondition]: """Transform a WavCondition to a nullified WavCondition. It replaces the wav by a null tensor, forces its length to 0, and replaces metadata by dummy attributes. Args: cond (WavCondition): Wav condition with wav, tensor of shape [B, T]. Returns: WavCondition: Nullified wav condition. """ if not isinstance(cond, WavChordTextCondition): null_wav, _ = nullify_condition((cond.wav, torch.zeros_like(cond.wav)), dim=cond.wav.dim() - 1) return WavCondition( wav=null_wav, length=torch.tensor([0] * cond.wav.shape[0], device=cond.wav.device), sample_rate=cond.sample_rate, path=[None] * cond.wav.shape[0], seek_time=[None] * cond.wav.shape[0], ) else: return WavChordTextCondition( wav=['N']* len(cond.wav), length=torch.tensor([0] * len(cond.wav), device=cond.length.device), sample_rate=cond.sample_rate, path=[None], seek_time=[None], bpm = cond.bpm, meter = cond.meter ) def nullify_joint_embed(embed: JointEmbedCondition) -> JointEmbedCondition: """Nullify the joint embedding condition by replacing it by a null tensor, forcing its length to 0, and replacing metadata by dummy attributes. Args: cond (JointEmbedCondition): Joint embedding condition with wav and text, wav tensor of shape [B, C, T]. """ null_wav, _ = nullify_condition((embed.wav, torch.zeros_like(embed.wav)), dim=embed.wav.dim() - 1) return JointEmbedCondition( wav=null_wav, text=[None] * len(embed.text), length=torch.LongTensor([0]).to(embed.wav.device), sample_rate=embed.sample_rate, path=[None] * embed.wav.shape[0], seek_time=[0] * embed.wav.shape[0], ) class Tokenizer: """Base tokenizer implementation (in case we want to introduce more advances tokenizers in the future). """ def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: raise NotImplementedError() class WhiteSpaceTokenizer(Tokenizer): """This tokenizer should be used for natural language descriptions. For example: ["he didn't, know he's going home.", 'shorter sentence'] => [[78, 62, 31, 4, 78, 25, 19, 34], [59, 77, 0, 0, 0, 0, 0, 0]] """ PUNCTUATION = "?:!.,;" def __init__(self, n_bins: int, pad_idx: int = 0, language: str = "en_core_web_sm", lemma: bool = True, stopwords: bool = True) -> None: self.n_bins = n_bins self.pad_idx = pad_idx self.lemma = lemma self.stopwords = stopwords try: self.nlp = spacy.load(language) except IOError: spacy.cli.download(language) # type: ignore self.nlp = spacy.load(language) @tp.no_type_check def __call__(self, texts: tp.List[tp.Optional[str]], return_text: bool = False) -> tp.Tuple[torch.Tensor, torch.Tensor]: """Take a list of strings and convert them to a tensor of indices. Args: texts (list[str]): List of strings. return_text (bool, optional): Whether to return text as additional tuple item. Defaults to False. Returns: tuple[torch.Tensor, torch.Tensor]: - Indices of words in the LUT. - And a mask indicating where the padding tokens are """ output, lengths = [], [] texts = deepcopy(texts) for i, text in enumerate(texts): # if current sample doesn't have a certain attribute, replace with pad token if text is None: output.append(torch.Tensor([self.pad_idx])) lengths.append(0) continue # convert numbers to words text = re.sub(r"(\d+)", lambda x: num2words(int(x.group(0))), text) # type: ignore # normalize text text = self.nlp(text) # type: ignore # remove stopwords if self.stopwords: text = [w for w in text if not w.is_stop] # type: ignore # remove punctuation text = [w for w in text if w.text not in self.PUNCTUATION] # type: ignore # lemmatize if needed text = [getattr(t, "lemma_" if self.lemma else "text") for t in text] # type: ignore texts[i] = " ".join(text) lengths.append(len(text)) # convert to tensor tokens = torch.Tensor([hash_trick(w, self.n_bins) for w in text]) output.append(tokens) mask = length_to_mask(torch.IntTensor(lengths)).int() padded_output = pad_sequence(output, padding_value=self.pad_idx).int().t() if return_text: return padded_output, mask, texts # type: ignore return padded_output, mask class NoopTokenizer(Tokenizer): """This tokenizer should be used for global conditioners such as: artist, genre, key, etc. The difference between this and WhiteSpaceTokenizer is that NoopTokenizer does not split strings, so "Jeff Buckley" will get it's own index. Whereas WhiteSpaceTokenizer will split it to ["Jeff", "Buckley"] and return an index per word. For example: ["Queen", "ABBA", "Jeff Buckley"] => [43, 55, 101] ["Metal", "Rock", "Classical"] => [0, 223, 51] """ def __init__(self, n_bins: int, pad_idx: int = 0): self.n_bins = n_bins self.pad_idx = pad_idx def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: output, lengths = [], [] for text in texts: # if current sample doesn't have a certain attribute, replace with pad token if text is None: output.append(self.pad_idx) lengths.append(0) else: output.append(hash_trick(text, self.n_bins)) lengths.append(1) tokens = torch.LongTensor(output).unsqueeze(1) mask = length_to_mask(torch.IntTensor(lengths)).int() return tokens, mask class BaseConditioner(nn.Module): """Base model for all conditioner modules. We allow the output dim to be different than the hidden dim for two reasons: 1) keep our LUTs small when the vocab is large; 2) make all condition dims consistent. Args: dim (int): Hidden dim of the model. output_dim (int): Output dim of the conditioner. """ def __init__(self, dim: int, output_dim: int): super().__init__() self.dim = dim self.output_dim = output_dim self.output_proj = nn.Linear(dim, output_dim) def tokenize(self, *args, **kwargs) -> tp.Any: """Should be any part of the processing that will lead to a synchronization point, e.g. BPE tokenization with transfer to the GPU. The returned value will be saved and return later when calling forward(). """ raise NotImplementedError() def forward(self, inputs: tp.Any) -> ConditionType: """Gets input that should be used as conditioning (e.g, genre, description or a waveform). Outputs a ConditionType, after the input data was embedded as a dense vector. Returns: ConditionType: - A tensor of size [B, T, D] where B is the batch size, T is the length of the output embedding and D is the dimension of the embedding. - And a mask indicating where the padding tokens. """ raise NotImplementedError() class TextConditioner(BaseConditioner): ... class LUTConditioner(TextConditioner): """Lookup table TextConditioner. Args: n_bins (int): Number of bins. dim (int): Hidden dim of the model (text-encoder/LUT). output_dim (int): Output dim of the conditioner. tokenizer (str): Name of the tokenizer. pad_idx (int, optional): Index for padding token. Defaults to 0. """ def __init__(self, n_bins: int, dim: int, output_dim: int, tokenizer: str, pad_idx: int = 0): super().__init__(dim, output_dim) self.embed = nn.Embedding(n_bins, dim) self.tokenizer: Tokenizer if tokenizer == 'whitespace': self.tokenizer = WhiteSpaceTokenizer(n_bins, pad_idx=pad_idx) elif tokenizer == 'noop': self.tokenizer = NoopTokenizer(n_bins, pad_idx=pad_idx) else: raise ValueError(f"unrecognized tokenizer `{tokenizer}`.") def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: device = self.embed.weight.device tokens, mask = self.tokenizer(x) tokens, mask = tokens.to(device), mask.to(device) return tokens, mask def forward(self, inputs: tp.Tuple[torch.Tensor, torch.Tensor]) -> ConditionType: tokens, mask = inputs embeds = self.embed(tokens) embeds = self.output_proj(embeds) embeds = (embeds * mask.unsqueeze(-1)) return embeds, mask class T5Conditioner(TextConditioner): """T5-based TextConditioner. Args: name (str): Name of the T5 model. output_dim (int): Output dim of the conditioner. finetune (bool): Whether to fine-tune T5 at train time. device (str): Device for T5 Conditioner. autocast_dtype (tp.Optional[str], optional): Autocast dtype. word_dropout (float, optional): Word dropout probability. normalize_text (bool, optional): Whether to apply text normalization. """ MODELS = ["t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b", "google/flan-t5-small", "google/flan-t5-base", "google/flan-t5-large", "google/flan-t5-xl", "google/flan-t5-xxl"] MODELS_DIMS = { "t5-small": 512, "t5-base": 768, "t5-large": 1024, "t5-3b": 1024, "t5-11b": 1024, "google/flan-t5-small": 512, "google/flan-t5-base": 768, "google/flan-t5-large": 1024, "google/flan-t5-3b": 1024, "google/flan-t5-11b": 1024, } def __init__(self, name: str, output_dim: int, finetune: bool, device: str, autocast_dtype: tp.Optional[str] = 'float32', word_dropout: float = 0., normalize_text: bool = False): assert name in self.MODELS, f"Unrecognized t5 model name (should in {self.MODELS})" super().__init__(self.MODELS_DIMS[name], output_dim) self.device = device self.name = name self.finetune = finetune self.word_dropout = word_dropout if autocast_dtype is None or self.device == 'cpu': self.autocast = TorchAutocast(enabled=False) if self.device != 'cpu': logger.warning("T5 has no autocast, this might lead to NaN") else: dtype = getattr(torch, autocast_dtype) assert isinstance(dtype, torch.dtype) logger.info(f"T5 will be evaluated with autocast as {autocast_dtype}") self.autocast = TorchAutocast(enabled=True, device_type=self.device, dtype=dtype) # Let's disable logging temporarily because T5 will vomit some errors otherwise. # thanks https://gist.github.com/simon-weber/7853144 previous_level = logging.root.manager.disable logging.disable(logging.ERROR) with warnings.catch_warnings(): warnings.simplefilter("ignore") try: self.t5_tokenizer = T5Tokenizer.from_pretrained(name) t5 = T5EncoderModel.from_pretrained(name).train(mode=finetune) finally: logging.disable(previous_level) if finetune: self.t5 = t5 else: # this makes sure that the t5 models is not part # of the saved checkpoint self.__dict__['t5'] = t5.to(device) self.normalize_text = normalize_text if normalize_text: self.text_normalizer = WhiteSpaceTokenizer(1, lemma=True, stopwords=True) def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Dict[str, torch.Tensor]: # if current sample doesn't have a certain attribute, replace with empty string entries: tp.List[str] = [xi if xi is not None else "" for xi in x] if self.normalize_text: _, _, entries = self.text_normalizer(entries, return_text=True) if self.word_dropout > 0. and self.training: new_entries = [] for entry in entries: words = [word for word in entry.split(" ") if random.random() >= self.word_dropout] new_entries.append(" ".join(words)) entries = new_entries empty_idx = torch.LongTensor([i for i, xi in enumerate(entries) if xi == ""]) inputs = self.t5_tokenizer(entries, return_tensors='pt', padding=True).to(self.device) mask = inputs['attention_mask'] mask[empty_idx, :] = 0 # zero-out index where the input is non-existant return inputs def forward(self, inputs: tp.Dict[str, torch.Tensor]) -> ConditionType: mask = inputs['attention_mask'] with torch.set_grad_enabled(self.finetune), self.autocast: embeds = self.t5(**inputs).last_hidden_state embeds = self.output_proj(embeds.to(self.output_proj.weight)) embeds = (embeds * mask.unsqueeze(-1)) return embeds, mask class WaveformConditioner(BaseConditioner): """Base class for all conditioners that take a waveform as input. Classes that inherit must implement `_get_wav_embedding` that outputs a continuous tensor, and `_downsampling_factor` that returns the down-sampling factor of the embedding model. Args: dim (int): The internal representation dimension. output_dim (int): Output dimension. device (tp.Union[torch.device, str]): Device. """ def __init__(self, dim: int, output_dim: int, device: tp.Union[torch.device, str]): super().__init__(dim, output_dim) self.device = device # if False no masking is done, used in ChromaStemConditioner when completing by periodicity a sample. self._use_masking = True def tokenize(self, x: WavCondition) -> WavCondition: wav, length, sample_rate, path, seek_time = x assert length is not None return WavCondition(wav.to(self.device), length.to(self.device), sample_rate, path, seek_time) def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor: """Gets as input a WavCondition and returns a dense embedding.""" raise NotImplementedError() def _downsampling_factor(self): """Returns the downsampling factor of the embedding model.""" raise NotImplementedError() def forward(self, x: WavCondition) -> ConditionType: """Extract condition embedding and mask from a waveform and its metadata. Args: x (WavCondition): Waveform condition containing raw waveform and metadata. Returns: ConditionType: a dense vector representing the conditioning along with its mask """ wav, lengths, *_ = x with torch.no_grad(): embeds = self._get_wav_embedding(x) embeds = embeds.to(self.output_proj.weight) embeds = self.output_proj(embeds) if lengths is not None and self._use_masking: lengths = lengths / self._downsampling_factor() mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore else: mask = torch.ones_like(embeds[..., 0]) embeds = (embeds * mask.unsqueeze(-1)) return embeds, mask class ChromaStemConditioner(WaveformConditioner): """Chroma conditioner based on stems. The ChromaStemConditioner uses DEMUCS to first filter out drums and bass, as the drums and bass often dominate the chroma leading to the chroma features not containing information about the melody. Args: output_dim (int): Output dimension for the conditioner. sample_rate (int): Sample rate for the chroma extractor. n_chroma (int): Number of chroma bins for the chroma extractor. radix2_exp (int): Size of stft window for the chroma extractor (power of 2, e.g. 12 -> 2^12). duration (int): duration used during training. This is later used for correct padding in case we are using chroma as prefix. match_len_on_eval (bool, optional): if True then all chromas are padded to the training duration. Defaults to False. eval_wavs (str, optional): path to a dataset manifest with waveform, this waveforms are used as conditions during eval (for cases where we don't want to leak test conditions like MusicCaps). Defaults to None. n_eval_wavs (int, optional): limits the number of waveforms used for conditioning. Defaults to 0. device (tp.Union[torch.device, str], optional): Device for the conditioner. **kwargs: Additional parameters for the chroma extractor. """ def __init__(self, output_dim: int, sample_rate: int, n_chroma: int, radix2_exp: int, duration: float, match_len_on_eval: bool = True, eval_wavs: tp.Optional[str] = None, n_eval_wavs: int = 0, cache_path: tp.Optional[tp.Union[str, Path]] = None, device: tp.Union[torch.device, str] = 'cpu', **kwargs): super().__init__(dim=n_chroma, output_dim=output_dim, device=device) self.autocast = TorchAutocast(enabled=device != 'cpu', device_type=self.device, dtype=torch.float32) self.sample_rate = sample_rate self.match_len_on_eval = match_len_on_eval if match_len_on_eval: self._use_masking = False self.duration = duration self.__dict__['demucs'] = pretrained.get_model('htdemucs').to(device) stem_sources: list = self.demucs.sources # type: ignore self.stem_indices = torch.LongTensor([stem_sources.index('vocals'), stem_sources.index('other')]).to(device) self.chroma = ChromaExtractor(sample_rate=sample_rate, n_chroma=n_chroma, radix2_exp=radix2_exp, **kwargs).to(device) self.chroma_len = self._get_chroma_len() self.eval_wavs: tp.Optional[torch.Tensor] = self._load_eval_wavs(eval_wavs, n_eval_wavs) self.cache = None if cache_path is not None: self.cache = EmbeddingCache(Path(cache_path) / 'wav', self.device, compute_embed_fn=self._get_full_chroma_for_cache, extract_embed_fn=self._extract_chroma_chunk) def _downsampling_factor(self) -> int: return self.chroma.winhop def _load_eval_wavs(self, path: tp.Optional[str], num_samples: int) -> tp.Optional[torch.Tensor]: """Load pre-defined waveforms from a json. These waveforms will be used for chroma extraction during evaluation. This is done to make the evaluation on MusicCaps fair (we shouldn't see the chromas of MusicCaps). """ if path is None: return None logger.info(f"Loading evaluation wavs from {path}") dataset: AudioDataset = AudioDataset.from_meta( path, segment_duration=self.duration, min_audio_duration=self.duration, sample_rate=self.sample_rate, channels=1) if len(dataset) > 0: eval_wavs = dataset.collater([dataset[i] for i in range(num_samples)]).to(self.device) logger.info(f"Using {len(eval_wavs)} evaluation wavs for chroma-stem conditioner") return eval_wavs else: raise ValueError("Could not find evaluation wavs, check lengths of wavs") def reset_eval_wavs(self, eval_wavs: tp.Optional[torch.Tensor]) -> None: self.eval_wavs = eval_wavs def has_eval_wavs(self) -> bool: return self.eval_wavs is not None def _sample_eval_wavs(self, num_samples: int) -> torch.Tensor: """Sample wavs from a predefined list.""" assert self.eval_wavs is not None, "Cannot sample eval wavs as no eval wavs provided." total_eval_wavs = len(self.eval_wavs) out = self.eval_wavs if num_samples > total_eval_wavs: out = self.eval_wavs.repeat(num_samples // total_eval_wavs + 1, 1, 1) return out[torch.randperm(len(out))][:num_samples] def _get_chroma_len(self) -> int: """Get length of chroma during training.""" dummy_wav = torch.zeros((1, int(self.sample_rate * self.duration)), device=self.device) dummy_chr = self.chroma(dummy_wav) return dummy_chr.shape[1] @torch.no_grad() def _get_stemmed_wav(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: """Get parts of the wav that holds the melody, extracting the main stems from the wav.""" with self.autocast: wav = convert_audio( wav, sample_rate, self.demucs.samplerate, self.demucs.audio_channels) # type: ignore stems = apply_model(self.demucs, wav, device=self.device) stems = stems[:, self.stem_indices] # extract relevant stems for melody conditioning mix_wav = stems.sum(1) # merge extracted stems to single waveform mix_wav = convert_audio(mix_wav, self.demucs.samplerate, self.sample_rate, 1) # type: ignore return mix_wav @torch.no_grad() def _extract_chroma(self, wav: torch.Tensor) -> torch.Tensor: """Extract chroma features from the waveform.""" with self.autocast: return self.chroma(wav) @torch.no_grad() def _compute_wav_embedding(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: """Compute wav embedding, applying stem and chroma extraction.""" # avoid 0-size tensors when we are working with null conds if wav.shape[-1] == 1: return self._extract_chroma(wav) stems = self._get_stemmed_wav(wav, sample_rate) chroma = self._extract_chroma(stems) return chroma @torch.no_grad() def _get_full_chroma_for_cache(self, path: tp.Union[str, Path], x: WavCondition, idx: int) -> torch.Tensor: """Extract chroma from the whole audio waveform at the given path.""" wav, sr = audio_read(path) wav = wav[None].to(self.device) wav = convert_audio(wav, sr, self.sample_rate, to_channels=1) chroma = self._compute_wav_embedding(wav, self.sample_rate)[0] return chroma def _extract_chroma_chunk(self, full_chroma: torch.Tensor, x: WavCondition, idx: int) -> torch.Tensor: """Extract a chunk of chroma from the full chroma derived from the full waveform.""" wav_length = x.wav.shape[-1] seek_time = x.seek_time[idx] assert seek_time is not None, ( "WavCondition seek_time is required " "when extracting chroma chunks from pre-computed chroma.") full_chroma = full_chroma.float() frame_rate = self.sample_rate / self._downsampling_factor() target_length = int(frame_rate * wav_length / self.sample_rate) index = int(frame_rate * seek_time) out = full_chroma[index: index + target_length] out = F.pad(out[None], (0, 0, 0, target_length - out.shape[0]))[0] return out.to(self.device) @torch.no_grad() def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor: """Get the wav embedding from the WavCondition. The conditioner will either extract the embedding on-the-fly computing it from the condition wav directly or will rely on the embedding cache to load the pre-computed embedding if relevant. """ sampled_wav: tp.Optional[torch.Tensor] = None if not self.training and self.eval_wavs is not None: warn_once(logger, "Using precomputed evaluation wavs!") sampled_wav = self._sample_eval_wavs(len(x.wav)) no_undefined_paths = all(p is not None for p in x.path) no_nullified_cond = x.wav.shape[-1] > 1 if sampled_wav is not None: chroma = self._compute_wav_embedding(sampled_wav, self.sample_rate) elif self.cache is not None and no_undefined_paths and no_nullified_cond: paths = [Path(p) for p in x.path if p is not None] chroma = self.cache.get_embed_from_cache(paths, x) else: assert all(sr == x.sample_rate[0] for sr in x.sample_rate), "All sample rates in batch should be equal." chroma = self._compute_wav_embedding(x.wav, x.sample_rate[0]) if self.match_len_on_eval: B, T, C = chroma.shape if T > self.chroma_len: chroma = chroma[:, :self.chroma_len] logger.debug(f"Chroma was truncated to match length! ({T} -> {chroma.shape[1]})") elif T < self.chroma_len: n_repeat = int(math.ceil(self.chroma_len / T)) chroma = chroma.repeat(1, n_repeat, 1) chroma = chroma[:, :self.chroma_len] logger.debug(f"Chroma was repeated to match length! ({T} -> {chroma.shape[1]})") return chroma def tokenize(self, x: WavCondition) -> WavCondition: """Apply WavConditioner tokenization and populate cache if needed.""" x = super().tokenize(x) no_undefined_paths = all(p is not None for p in x.path) if self.cache is not None and no_undefined_paths: paths = [Path(p) for p in x.path if p is not None] self.cache.populate_embed_cache(paths, x) return x class ChromaChordConditioner(ChromaStemConditioner): """Chord Chroma conditioner based on stems. The ChromaChordConditioner uses DEMUCS to first filter out drums and bass, as the drums and bass often dominate the chroma leading to the chroma features not containing information about the melody. Args: output_dim (int): Output dimension for the conditioner. sample_rate (int): Sample rate for the chroma extractor. n_chroma (int): Number of chroma bins for the chroma extractor. radix2_exp (int): Size of stft window for the chroma extractor (power of 2, e.g. 12 -> 2^12). duration (int): duration used during training. This is later used for correct padding in case we are using chroma as prefix. match_len_on_eval (bool, optional): if True then all chromas are padded to the training duration. Defaults to False. eval_wavs (str, optional): path to a dataset manifest with waveform, this waveforms are used as conditions during eval (for cases where we don't want to leak test conditions like MusicCaps). Defaults to None. n_eval_wavs (int, optional): limits the number of waveforms used for conditioning. Defaults to 0. device (tp.Union[torch.device, str], optional): Device for the conditioner. **kwargs: Additional parameters for the chroma extractor. """ def __init__(self, output_dim: int, sample_rate: int, n_chroma: int, radix2_exp: int, duration: float, match_len_on_eval: bool = True, eval_wavs: tp.Optional[str] = None, n_eval_wavs: int = 0, cache_path: tp.Optional[tp.Union[str, Path]] = None, device: tp.Union[torch.device, str] = 'cpu', **kwargs): super().__init__(output_dim = output_dim, sample_rate = sample_rate, n_chroma = n_chroma, radix2_exp = radix2_exp, duration = duration, match_len_on_eval = match_len_on_eval, eval_wavs = eval_wavs, n_eval_wavs = n_eval_wavs, cache_path = cache_path, device = device) self.winhop = self.chroma.winhop self.__dict__['demucs'] = pretrained.get_model('htdemucs').to(device) stem_sources: list = self.demucs.sources self.stem_indices = torch.LongTensor([stem_sources.index('vocals'), stem_sources.index('bass'), stem_sources.index('other')]).to(device) self.chroma_len = self._get_chroma_len() self.bar2chromabin = self.sample_rate / self.winhop self.chroma = ChordExtractor(device = device, sample_rate=sample_rate, n_chroma=n_chroma, max_duration = duration, chroma_len = self.chroma_len, winhop = self.winhop).to(device) self.chords = chords.Chords() self.chroma_coefficient = 1 self.continuation_count = 0 # for infinite generation with text chroma #3 Layered MLP projection override ''' self.output_proj = nn.Sequential( nn.Linear(n_chroma, 128), nn.ReLU(), nn.Linear(128, 256), nn.ReLU(), nn.Linear(256, output_dim) ) ''' def _downsampling_factor(self) -> int: return self.winhop def _load_eval_wavs(self, path: tp.Optional[str], num_samples: int) -> tp.Optional[torch.Tensor]: """Load pre-defined waveforms from a json. These waveforms will be used for chroma extraction during evaluation. This is done to make the evaluation on MusicCaps fair (we shouldn't see the chromas of MusicCaps). """ if path is None: return None logger.info(f"Loading evaluation wavs from {path}") dataset: AudioDataset = AudioDataset.from_meta( path, segment_duration=self.duration, min_audio_duration=self.duration, sample_rate=self.sample_rate, channels=1) if len(dataset) > 0: eval_wavs = dataset.collater([dataset[i] for i in range(num_samples)]).to(self.device) logger.info(f"Using {len(eval_wavs)} evaluation wavs for chroma-stem conditioner") return eval_wavs else: raise ValueError("Could not find evaluation wavs, check lengths of wavs") def reset_eval_wavs(self, eval_wavs: tp.Optional[torch.Tensor]) -> None: self.eval_wavs = eval_wavs def has_eval_wavs(self) -> bool: return self.eval_wavs is not None def _sample_eval_wavs(self, num_samples: int) -> torch.Tensor: """Sample wavs from a predefined list.""" assert self.eval_wavs is not None, "Cannot sample eval wavs as no eval wavs provided." total_eval_wavs = len(self.eval_wavs) out = self.eval_wavs if num_samples > total_eval_wavs: out = self.eval_wavs.repeat(num_samples // total_eval_wavs + 1, 1, 1) return out[torch.randperm(len(out))][:num_samples] def _get_chroma_len(self) -> int: """Get length of chroma during training.""" dummy_wav = torch.zeros((1, int(self.sample_rate * self.duration)), device=self.device) dummy_chr = self.chroma(dummy_wav) return dummy_chr.shape[1] @torch.no_grad() def _get_stemmed_wav(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: """Get parts of the wav that holds the melody, extracting the main stems from the wav.""" with self.autocast: wav = convert_audio( wav, sample_rate, self.demucs.samplerate, self.demucs.audio_channels) # type: ignore stems = apply_model(self.demucs, wav, device=self.device) stems = stems[:, self.stem_indices] # extract relevant stems for melody conditioning mix_wav = stems.sum(1) # merge extracted stems to single waveform mix_wav = convert_audio(mix_wav, self.demucs.samplerate, self.sample_rate, 1) # type: ignore return mix_wav @torch.no_grad() def _extract_chroma(self, wav: torch.Tensor) -> torch.Tensor: """Extract chroma features from the waveform.""" with self.autocast: return self.chroma(wav) @torch.no_grad() def _compute_wav_embedding(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: """Compute wav embedding, applying stem and chroma extraction.""" # avoid 0-size tensors when we are working with null conds if wav.shape[-1] == 1: # print("1515151") return self._extract_chroma(wav) stems = self._get_stemmed_wav(wav, sample_rate) chroma = self._extract_chroma(stems) # print("2727272") return chroma @torch.no_grad() def _get_full_chroma_for_cache(self, path: tp.Union[str, Path], x: WavCondition, idx: int) -> torch.Tensor: """Extract chroma from the whole audio waveform at the given path.""" wav, sr = audio_read(path) wav = wav[None].to(self.device) wav = convert_audio(wav, sr, self.sample_rate, to_channels=1) chroma = self._compute_wav_embedding(wav, self.sample_rate)[0] return chroma def _extract_chroma_chunk(self, full_chroma: torch.Tensor, x: WavCondition, idx: int) -> torch.Tensor: """Extract a chunk of chroma from the full chroma derived from the full waveform.""" wav_length = x.wav.shape[-1] seek_time = x.seek_time[idx] assert seek_time is not None, ( "WavCondition seek_time is required " "when extracting chroma chunks from pre-computed chroma.") full_chroma = full_chroma.float() frame_rate = self.sample_rate / self._downsampling_factor() target_length = int(frame_rate * wav_length / self.sample_rate) index = int(frame_rate * seek_time) out = full_chroma[index: index + target_length] out = F.pad(out[None], (0, 0, 0, target_length - out.shape[0]))[0] return out.to(self.device) def set_continuation_count(self, sub_duration_ratio, current_iter): self.continuation_count = int(self.chroma_len * sub_duration_ratio * current_iter) @torch.no_grad() def _get_wav_embedding(self, x: tp.Union[WavCondition, WavChordTextCondition]) -> torch.Tensor: """Get the wav embedding from the WavCondition. The conditioner will either extract the embedding on-the-fly computing it from the condition wav directly or will rely on the embedding cache to load the pre-computed embedding if relevant. """ if isinstance(x, WavCondition): sampled_wav: tp.Optional[torch.Tensor] = None if not self.training and self.eval_wavs is not None: warn_once(logger, "Using precomputed evaluation wavs!") sampled_wav = self._sample_eval_wavs(len(x.wav)) no_undefined_paths = all(p is not None for p in x.path) no_nullified_cond = x.wav.shape[-1] > 1 if sampled_wav is not None: chroma = self._compute_wav_embedding(sampled_wav, self.sample_rate) # print("111111") elif self.cache is not None and no_undefined_paths and no_nullified_cond: paths = [Path(p) for p in x.path if p is not None] chroma = self.cache.get_embed_from_cache(paths, x) # print("222222") #Works here else: assert all(sr == x.sample_rate[0] for sr in x.sample_rate), "All sample rates in batch should be equal." chroma = self._compute_wav_embedding(x.wav, x.sample_rate[0]) # print("333333") #and here in training else: chromas = [] for wav, bpm, meter in zip(x.wav, x.bpm, x.meter): chroma = torch.zeros([self.chroma_len, self.dim]) count = 0 offset = 0 stext = wav.split(" ") barsec = 60/(bpm/meter) timebin = barsec * self.bar2chromabin while count < self.chroma_len: for tokens in stext: if count >= self.chroma_len: break stoken = tokens.split(',') for token in stoken: off_timebin = timebin + offset rounded_timebin = round(off_timebin) offset = off_timebin - rounded_timebin offset = offset/len(stoken) add_step = rounded_timebin//len(stoken) mhot = self.chords.chord(token) rolled = np.roll(mhot[2], mhot[0]) for i in range(count, count + add_step): if self.continuation_count > 0: self.continuation_count -= 1 continue if count >= self.chroma_len: break chroma[i] = torch.Tensor(rolled) count += 1 chromas.append(chroma) chroma = torch.stack(chromas)*self.chroma_coefficient if self.match_len_on_eval: B, T, C = chroma.shape if T > self.chroma_len: chroma = chroma[:, :self.chroma_len] logger.debug(f"Chroma was truncated to match length! ({T} -> {chroma.shape[1]})") elif T < self.chroma_len: n_repeat = int(math.ceil(self.chroma_len / T)) chroma = chroma.repeat(1, n_repeat, 1) chroma = chroma[:, :self.chroma_len] logger.debug(f"Chroma was repeated to match length! ({T} -> {chroma.shape[1]})") return chroma def tokenize(self, x: tp.Union[WavCondition, WavChordTextCondition]) -> tp.Union[WavCondition, WavChordTextCondition]: if isinstance(x, WavCondition): wav, length, sample_rate, path, seek_time = x assert length is not None return WavCondition(wav.to(self.device), length.to(self.device), sample_rate, path, seek_time) else: wav, length, sample_rate, path, seek_time, bpm, meter = x return WavChordTextCondition(wav, length.to(self.device), sample_rate, path, seek_time, bpm, meter) def forward(self, x: WavCondition) -> ConditionType: """Extract condition embedding and mask from a waveform and its metadata. Args: x (WavCondition): Waveform condition containing raw waveform and metadata. Returns: ConditionType: a dense vector representing the conditioning along with its mask """ wav, lengths, *_ = x with torch.no_grad(): embeds = self._get_wav_embedding(x) #chroma embeds = embeds.to(self.output_proj.weight) # embeds = embeds * (torch.rand(embeds.shape).to(self.device) * 0.3) embeds = self.output_proj(embeds) if self.match_len_on_eval: if lengths is not None: for i in range(len(lengths)): if lengths[i] > 0 and lengths[i] < self.duration * self.sample_rate: lengths[i] = torch.Tensor([(self.duration+1) * self.sample_rate]) lengths = lengths / self._downsampling_factor() mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore else: mask = torch.ones_like(embeds) else: if lengths is not None: lengths = lengths / self._downsampling_factor() mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore else: mask = torch.ones_like(embeds) embeds = (embeds.to(self.device) * mask.unsqueeze(2).to(self.device)) return embeds.to(self.device), mask.to(self.device) class JointEmbeddingConditioner(BaseConditioner): """Joint embedding conditioning supporting both audio or text conditioning. Args: dim (int): Dimension. output_dim (int): Output dimension. device (str): Device. attribute (str): Attribute used by the conditioner. autocast_dtype (str): Autocast for the conditioner. quantize (bool): Whether to quantize the CLAP embedding. n_q (int): Number of residual quantizers (used if quantize is true). bins (int): Quantizers' codebooks size (used if quantize is true). kwargs: Additional parameters for residual vector quantizer. """ def __init__(self, dim: int, output_dim: int, device: str, attribute: str, autocast_dtype: tp.Optional[str] = 'float32', quantize: bool = True, n_q: int = 12, bins: int = 1024, **kwargs): super().__init__(dim=dim, output_dim=output_dim) self.device = device self.attribute = attribute if autocast_dtype is None or device == 'cpu': self.autocast = TorchAutocast(enabled=False) logger.warning("JointEmbeddingConditioner has no autocast, this might lead to NaN.") else: dtype = getattr(torch, autocast_dtype) assert isinstance(dtype, torch.dtype) logger.info(f"JointEmbeddingConditioner will be evaluated with autocast as {autocast_dtype}.") self.autocast = TorchAutocast(enabled=True, device_type=self.device, dtype=dtype) # residual vector quantizer to discretize the conditioned embedding self.quantizer: tp.Optional[ResidualVectorQuantizer] = None if quantize: self.quantizer = ResidualVectorQuantizer(dim, n_q=n_q, bins=bins, **kwargs) def _get_embed(self, x: JointEmbedCondition) -> tp.Tuple[torch.Tensor, torch.Tensor]: """Get joint embedding in latent space from the inputs. Returns: tuple[torch.Tensor, torch.Tensor]: Tensor for the latent embedding and corresponding empty indexes. """ raise NotImplementedError() def forward(self, x: JointEmbedCondition) -> ConditionType: with self.autocast: embed, empty_idx = self._get_embed(x) if self.quantizer is not None: embed = embed.view(-1, self.dim, 1) q_res = self.quantizer(embed, frame_rate=1) out_embed = q_res.x.view(-1, self.dim) else: out_embed = embed out_embed = self.output_proj(out_embed).view(-1, 1, self.output_dim) mask = torch.ones(*out_embed.shape[:2], device=out_embed.device) mask[empty_idx, :] = 0 # zero-out index where the input is non-existant out_embed = (out_embed * mask.unsqueeze(-1)) return out_embed, mask def tokenize(self, x: JointEmbedCondition) -> JointEmbedCondition: return x class CLAPEmbeddingConditioner(JointEmbeddingConditioner): """Joint Embedding conditioner based on pre-trained CLAP model. This CLAP-based conditioner supports a caching mechanism over the computed embeddings for faster training. Args: dim (int): Dimension. output_dim (int): Output dimension. device (str): Device. attribute (str): Attribute used by the conditioner. quantize (bool): Whether to quantize the CLAP embedding. n_q (int): Number of residual quantizers (used if quantize is true). bins (int): Quantizers' codebooks size (used if quantize is true). checkpoint (str): Path to CLAP checkpoint. model_arch (str): CLAP model architecture. enable_fusion (bool): Enable fusion for CLAP model. sample_rate (int): Sample rate used by CLAP model. max_audio_length (float): Maximum audio length for CLAP model. audio_stride (float): Stride to use for getting a CLAP embedding on the full sequence. normalize (bool): Whether to normalize the CLAP embedding. text_p (float): Probability of using text representation instead of audio at train time. batch_size (Optional[int]): Batch size for CLAP embedding computation. autocast_dtype (str): Autocast for the conditioner. cache_path (Optional[str]): Path for pre-computed embeddings caching. kwargs: Additional parameters for residual vector quantizer. """ def __init__(self, dim: int, output_dim: int, device: str, attribute: str, quantize: bool, n_q: int, bins: int, checkpoint: tp.Union[str, Path], model_arch: str, enable_fusion: bool, sample_rate: int, max_audio_length: int, audio_stride: int, normalize: bool, text_p: bool, batch_size: tp.Optional[int] = None, autocast_dtype: tp.Optional[str] = 'float32', cache_path: tp.Optional[str] = None, **kwargs): try: except ImportError: raise ImportError("Please install CLAP to use the CLAPEmbeddingConditioner: 'pip install laion_clap'") warnings.warn("Sample rate for CLAP conditioner was fixed in version v1.1.0, (from 44.1 to 48 kHz). " "Please retrain all models.")
checkpoint = AudioCraftEnvironment.resolve_reference_path(checkpoint)
7
2023-10-09 09:55:24+00:00
16k
Texaser/MTN
nerf/network_grid_tcnn.py
[ { "identifier": "trunc_exp", "path": "activation.py", "snippet": "class _trunc_exp(Function):\n def forward(ctx, x):\n def backward(ctx, g):\ndef biased_softplus(x, bias=0):" }, { "identifier": "NeRFRenderer", "path": "nerf/renderer.py", "snippet": "class NeRFRenderer(nn.Module):\n def __init__(self, opt):\n super().__init__()\n\n self.opt = opt\n self.bound = opt.bound\n self.cascade = 1 + math.ceil(math.log2(opt.bound))\n self.grid_size = 128\n self.max_level = None\n self.dmtet = opt.dmtet\n self.cuda_ray = opt.cuda_ray\n self.taichi_ray = opt.taichi_ray\n self.min_near = opt.min_near\n self.density_thresh = opt.density_thresh\n self.train_step = 0\n self.max_train_step = 6000\n # prepare aabb with a 6D tensor (xmin, ymin, zmin, xmax, ymax, zmax)\n # NOTE: aabb (can be rectangular) is only used to generate points, we still rely on bound (always cubic) to calculate density grid and hashing.\n aabb_train = torch.FloatTensor([-opt.bound, -opt.bound, -opt.bound, opt.bound, opt.bound, opt.bound])\n aabb_infer = aabb_train.clone()\n self.register_buffer('aabb_train', aabb_train)\n self.register_buffer('aabb_infer', aabb_infer)\n\n self.glctx = None\n\n # extra state for cuda raymarching\n if self.cuda_ray:\n # density grid\n density_grid = torch.zeros([self.cascade, self.grid_size ** 3]) # [CAS, H * H * H]\n density_bitfield = torch.zeros(self.cascade * self.grid_size ** 3 // 8, dtype=torch.uint8) # [CAS * H * H * H // 8]\n self.register_buffer('density_grid', density_grid)\n self.register_buffer('density_bitfield', density_bitfield)\n self.mean_density = 0\n self.iter_density = 0\n \n if self.opt.dmtet:\n # load dmtet vertices\n tets = np.load('tets/{}_tets.npz'.format(self.opt.tet_grid_size))\n self.verts = - torch.tensor(tets['vertices'], dtype=torch.float32, device='cuda') * 2 # covers [-1, 1]\n self.indices = torch.tensor(tets['indices'], dtype=torch.long, device='cuda')\n self.tet_scale = torch.tensor([1, 1, 1], dtype=torch.float32, device='cuda')\n self.dmtet = DMTet('cuda')\n\n # vert sdf and deform\n sdf = torch.nn.Parameter(torch.zeros_like(self.verts[..., 0]), requires_grad=True)\n self.register_parameter('sdf', sdf)\n deform = torch.nn.Parameter(torch.zeros_like(self.verts), requires_grad=True)\n self.register_parameter('deform', deform)\n\n edges = torch.tensor([0,1, 0,2, 0,3, 1,2, 1,3, 2,3], dtype=torch.long, device=\"cuda\") # six edges for each tetrahedron.\n all_edges = self.indices[:,edges].reshape(-1,2) # [M * 6, 2]\n all_edges_sorted = torch.sort(all_edges, dim=1)[0]\n self.all_edges = torch.unique(all_edges_sorted, dim=0)\n\n if self.opt.h <= 2048 and self.opt.w <= 2048:\n self.glctx = dr.RasterizeCudaContext()\n else:\n self.glctx = dr.RasterizeGLContext()\n \n if self.taichi_ray:\n from einops import rearrange\n from taichi_modules import RayMarcherTaichi\n from taichi_modules import VolumeRendererTaichi\n from taichi_modules import RayAABBIntersector as RayAABBIntersectorTaichi\n from taichi_modules import raymarching_test as raymarching_test_taichi\n from taichi_modules import composite_test as composite_test_fw\n from taichi_modules import packbits as packbits_taichi\n self.rearrange = rearrange\n self.packbits_taichi = packbits_taichi\n self.ray_aabb_intersector = RayAABBIntersectorTaichi\n self.raymarching_test_taichi = raymarching_test_taichi\n self.composite_test_fw = composite_test_fw\n self.ray_marching = RayMarcherTaichi(batch_size=4096) # TODO: hard encoded batch size\n self.volume_render = VolumeRendererTaichi(batch_size=4096) # TODO: hard encoded batch size\n # density grid\n density_grid = torch.zeros([self.cascade, self.grid_size ** 3]) # [CAS, H * H * H]\n density_bitfield = torch.zeros(self.cascade * self.grid_size ** 3 // 8, dtype=torch.uint8) # [CAS * H * H * H // 8]\n self.register_buffer('density_grid', density_grid)\n self.register_buffer('density_bitfield', density_bitfield)\n self.mean_density = 0\n self.iter_density = 0\n \n @torch.no_grad()\n def density_blob(self, x):\n # x: [B, N, 3]\n \n d = (x ** 2).sum(-1)\n \n if self.opt.density_activation == 'exp':\n g = self.opt.blob_density * torch.exp(- d / (2 * self.opt.blob_radius ** 2))\n else:\n g = self.opt.blob_density * (1 - torch.sqrt(d) / self.opt.blob_radius)\n\n return g\n \n def forward(self, x, d):\n raise NotImplementedError()\n\n def density(self, x):\n raise NotImplementedError()\n\n def reset_extra_state(self):\n if not (self.cuda_ray or self.taichi_ray):\n return \n # density grid\n self.density_grid.zero_()\n self.mean_density = 0\n self.iter_density = 0\n\n @torch.no_grad()\n def export_mesh(self, path, resolution=None, decimate_target=-1, S=128):\n\n if self.opt.dmtet:\n\n sdf = self.sdf\n deform = torch.tanh(self.deform) / self.opt.tet_grid_size\n\n vertices, triangles = self.dmtet(self.verts + deform, sdf, self.indices)\n\n vertices = vertices.detach().cpu().numpy()\n triangles = triangles.detach().cpu().numpy()\n\n else:\n\n if resolution is None:\n resolution = self.grid_size\n\n if self.cuda_ray:\n density_thresh = min(self.mean_density, self.density_thresh) \\\n if np.greater(self.mean_density, 0) else self.density_thresh\n else:\n density_thresh = self.density_thresh\n \n # TODO: use a larger thresh to extract a surface mesh from the density field, but this value is very empirical...\n if self.opt.density_activation == 'softplus':\n density_thresh = density_thresh * 25\n \n sigmas = np.zeros([resolution, resolution, resolution], dtype=np.float32)\n\n # query\n X = torch.linspace(-1, 1, resolution).split(S)\n Y = torch.linspace(-1, 1, resolution).split(S)\n Z = torch.linspace(-1, 1, resolution).split(S)\n\n for xi, xs in enumerate(X):\n for yi, ys in enumerate(Y):\n for zi, zs in enumerate(Z):\n xx, yy, zz = custom_meshgrid(xs, ys, zs)\n pts = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) # [S, 3]\n val = self.density(pts.to(self.aabb_train.device))\n sigmas[xi * S: xi * S + len(xs), yi * S: yi * S + len(ys), zi * S: zi * S + len(zs)] = val['sigma'].reshape(len(xs), len(ys), len(zs)).detach().cpu().numpy() # [S, 1] --> [x, y, z]\n\n print(f'[INFO] marching cubes thresh: {density_thresh} ({sigmas.min()} ~ {sigmas.max()})')\n\n vertices, triangles = mcubes.marching_cubes(sigmas, density_thresh)\n vertices = vertices / (resolution - 1.0) * 2 - 1\n\n # clean\n vertices = vertices.astype(np.float32)\n triangles = triangles.astype(np.int32)\n vertices, triangles = clean_mesh(vertices, triangles, remesh=True, remesh_size=0.01)\n \n # decimation\n if decimate_target > 0 and triangles.shape[0] > decimate_target:\n vertices, triangles = decimate_mesh(vertices, triangles, decimate_target)\n\n v = torch.from_numpy(vertices).contiguous().float().to(self.aabb_train.device)\n f = torch.from_numpy(triangles).contiguous().int().to(self.aabb_train.device)\n\n # mesh = trimesh.Trimesh(vertices, triangles, process=False) # important, process=True leads to seg fault...\n # mesh.export(os.path.join(path, f'mesh.ply'))\n\n def _export(v, f, h0=2048, w0=2048, ssaa=1, name=''):\n # v, f: torch Tensor\n device = v.device\n v_np = v.cpu().numpy() # [N, 3]\n f_np = f.cpu().numpy() # [M, 3]\n\n print(f'[INFO] running xatlas to unwrap UVs for mesh: v={v_np.shape} f={f_np.shape}')\n\n # unwrap uvs\n import xatlas\n import nvdiffrast.torch as dr\n from sklearn.neighbors import NearestNeighbors\n from scipy.ndimage import binary_dilation, binary_erosion\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(v_np, f_np)\n chart_options = xatlas.ChartOptions()\n chart_options.max_iterations = 4 # for faster unwrap...\n atlas.generate(chart_options=chart_options)\n vmapping, ft_np, vt_np = atlas[0] # [N], [M, 3], [N, 2]\n\n # vmapping, ft_np, vt_np = xatlas.parametrize(v_np, f_np) # [N], [M, 3], [N, 2]\n\n vt = torch.from_numpy(vt_np.astype(np.float32)).float().to(device)\n ft = torch.from_numpy(ft_np.astype(np.int64)).int().to(device)\n\n # render uv maps\n uv = vt * 2.0 - 1.0 # uvs to range [-1, 1]\n uv = torch.cat((uv, torch.zeros_like(uv[..., :1]), torch.ones_like(uv[..., :1])), dim=-1) # [N, 4]\n\n if ssaa > 1:\n h = int(h0 * ssaa)\n w = int(w0 * ssaa)\n else:\n h, w = h0, w0\n \n if self.glctx is None:\n if h <= 2048 and w <= 2048:\n self.glctx = dr.RasterizeCudaContext()\n else:\n self.glctx = dr.RasterizeGLContext()\n\n rast, _ = dr.rasterize(self.glctx, uv.unsqueeze(0), ft, (h, w)) # [1, h, w, 4]\n xyzs, _ = dr.interpolate(v.unsqueeze(0), rast, f) # [1, h, w, 3]\n mask, _ = dr.interpolate(torch.ones_like(v[:, :1]).unsqueeze(0), rast, f) # [1, h, w, 1]\n\n # masked query \n xyzs = xyzs.view(-1, 3)\n mask = (mask > 0).view(-1)\n \n feats = torch.zeros(h * w, 3, device=device, dtype=torch.float32)\n\n if mask.any():\n xyzs = xyzs[mask] # [M, 3]\n\n # batched inference to avoid OOM\n all_feats = []\n head = 0\n while head < xyzs.shape[0]:\n tail = min(head + 640000, xyzs.shape[0])\n results_ = self.density(xyzs[head:tail])\n all_feats.append(results_['albedo'].float())\n head += 640000\n\n feats[mask] = torch.cat(all_feats, dim=0)\n \n feats = feats.view(h, w, -1)\n mask = mask.view(h, w)\n\n # quantize [0.0, 1.0] to [0, 255]\n feats = feats.cpu().numpy()\n feats = (feats * 255).astype(np.uint8)\n\n ### NN search as an antialiasing ...\n mask = mask.cpu().numpy()\n\n inpaint_region = binary_dilation(mask, iterations=3)\n inpaint_region[mask] = 0\n\n search_region = mask.copy()\n not_search_region = binary_erosion(search_region, iterations=2)\n search_region[not_search_region] = 0\n\n search_coords = np.stack(np.nonzero(search_region), axis=-1)\n inpaint_coords = np.stack(np.nonzero(inpaint_region), axis=-1)\n\n knn = NearestNeighbors(n_neighbors=1, algorithm='kd_tree').fit(search_coords)\n _, indices = knn.kneighbors(inpaint_coords)\n\n feats[tuple(inpaint_coords.T)] = feats[tuple(search_coords[indices[:, 0]].T)]\n\n feats = cv2.cvtColor(feats, cv2.COLOR_RGB2BGR)\n\n # do ssaa after the NN search, in numpy\n if ssaa > 1:\n feats = cv2.resize(feats, (w0, h0), interpolation=cv2.INTER_LINEAR)\n\n cv2.imwrite(os.path.join(path, f'{name}albedo.png'), feats)\n\n # save obj (v, vt, f /)\n obj_file = os.path.join(path, f'{name}mesh.obj')\n mtl_file = os.path.join(path, f'{name}mesh.mtl')\n\n print(f'[INFO] writing obj mesh to {obj_file}')\n with open(obj_file, \"w\") as fp:\n fp.write(f'mtllib {name}mesh.mtl \\n')\n \n print(f'[INFO] writing vertices {v_np.shape}')\n for v in v_np:\n fp.write(f'v {v[0]} {v[1]} {v[2]} \\n')\n \n print(f'[INFO] writing vertices texture coords {vt_np.shape}')\n for v in vt_np:\n fp.write(f'vt {v[0]} {1 - v[1]} \\n') \n\n print(f'[INFO] writing faces {f_np.shape}')\n fp.write(f'usemtl mat0 \\n')\n for i in range(len(f_np)):\n fp.write(f\"f {f_np[i, 0] + 1}/{ft_np[i, 0] + 1} {f_np[i, 1] + 1}/{ft_np[i, 1] + 1} {f_np[i, 2] + 1}/{ft_np[i, 2] + 1} \\n\")\n\n with open(mtl_file, \"w\") as fp:\n fp.write(f'newmtl mat0 \\n')\n fp.write(f'Ka 1.000000 1.000000 1.000000 \\n')\n fp.write(f'Kd 1.000000 1.000000 1.000000 \\n')\n fp.write(f'Ks 0.000000 0.000000 0.000000 \\n')\n fp.write(f'Tr 1.000000 \\n')\n fp.write(f'illum 1 \\n')\n fp.write(f'Ns 0.000000 \\n')\n fp.write(f'map_Kd {name}albedo.png \\n')\n\n _export(v, f)\n\n def run(self, rays_o, rays_d, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, perturb=False, **kwargs):\n # rays_o, rays_d: [B, N, 3]\n # bg_color: [BN, 3] in range [0, 1]\n # return: image: [B, N, 3], depth: [B, N]\n\n prefix = rays_o.shape[:-1]\n rays_o = rays_o.contiguous().view(-1, 3)\n rays_d = rays_d.contiguous().view(-1, 3)\n\n N = rays_o.shape[0] # N = B * N, in fact\n device = rays_o.device\n\n results = {}\n\n # choose aabb\n aabb = self.aabb_train if self.training else self.aabb_infer\n\n # sample steps\n # nears, fars = raymarching.near_far_from_aabb(rays_o, rays_d, aabb, self.min_near)\n # nears.unsqueeze_(-1)\n # fars.unsqueeze_(-1)\n nears, fars = near_far_from_bound(rays_o, rays_d, self.bound, type='sphere', min_near=self.min_near)\n\n # random sample light_d if not provided\n if light_d is None:\n # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face)\n light_d = safe_normalize(rays_o + torch.randn(3, device=rays_o.device)) # [N, 3]\n\n #print(f'nears = {nears.min().item()} ~ {nears.max().item()}, fars = {fars.min().item()} ~ {fars.max().item()}')\n\n z_vals = torch.linspace(0.0, 1.0, self.opt.num_steps, device=device).unsqueeze(0) # [1, T]\n z_vals = z_vals.expand((N, self.opt.num_steps)) # [N, T]\n z_vals = nears + (fars - nears) * z_vals # [N, T], in [nears, fars]\n\n # perturb z_vals\n sample_dist = (fars - nears) / self.opt.num_steps\n if perturb:\n z_vals = z_vals + (torch.rand(z_vals.shape, device=device) - 0.5) * sample_dist\n #z_vals = z_vals.clamp(nears, fars) # avoid out of bounds xyzs.\n\n # generate xyzs\n xyzs = rays_o.unsqueeze(-2) + rays_d.unsqueeze(-2) * z_vals.unsqueeze(-1) # [N, 1, 3] * [N, T, 1] -> [N, T, 3]\n xyzs = torch.min(torch.max(xyzs, aabb[:3]), aabb[3:]) # a manual clip.\n\n #plot_pointcloud(xyzs.reshape(-1, 3).detach().cpu().numpy())\n\n # query SDF and RGB\n density_outputs = self.density(xyzs.reshape(-1, 3))\n\n #sigmas = density_outputs['sigma'].view(N, self.opt.num_steps) # [N, T]\n for k, v in density_outputs.items():\n density_outputs[k] = v.view(N, self.opt.num_steps, -1)\n\n # upsample z_vals (nerf-like)\n if self.opt.upsample_steps > 0:\n with torch.no_grad():\n\n deltas = z_vals[..., 1:] - z_vals[..., :-1] # [N, T-1]\n deltas = torch.cat([deltas, sample_dist * torch.ones_like(deltas[..., :1])], dim=-1)\n\n alphas = 1 - torch.exp(-deltas * density_outputs['sigma'].squeeze(-1)) # [N, T]\n alphas_shifted = torch.cat([torch.ones_like(alphas[..., :1]), 1 - alphas + 1e-15], dim=-1) # [N, T+1]\n weights = alphas * torch.cumprod(alphas_shifted, dim=-1)[..., :-1] # [N, T]\n\n # sample new z_vals\n z_vals_mid = (z_vals[..., :-1] + 0.5 * deltas[..., :-1]) # [N, T-1]\n new_z_vals = sample_pdf(z_vals_mid, weights[:, 1:-1], self.opt.upsample_steps, det=not self.training).detach() # [N, t]\n\n new_xyzs = rays_o.unsqueeze(-2) + rays_d.unsqueeze(-2) * new_z_vals.unsqueeze(-1) # [N, 1, 3] * [N, t, 1] -> [N, t, 3]\n new_xyzs = torch.min(torch.max(new_xyzs, aabb[:3]), aabb[3:]) # a manual clip.\n\n # only forward new points to save computation\n new_density_outputs = self.density(new_xyzs.reshape(-1, 3))\n #new_sigmas = new_density_outputs['sigma'].view(N, self.opt.upsample_steps) # [N, t]\n for k, v in new_density_outputs.items():\n new_density_outputs[k] = v.view(N, self.opt.upsample_steps, -1)\n\n # re-order\n z_vals = torch.cat([z_vals, new_z_vals], dim=1) # [N, T+t]\n z_vals, z_index = torch.sort(z_vals, dim=1)\n\n xyzs = torch.cat([xyzs, new_xyzs], dim=1) # [N, T+t, 3]\n xyzs = torch.gather(xyzs, dim=1, index=z_index.unsqueeze(-1).expand_as(xyzs))\n\n for k in density_outputs:\n tmp_output = torch.cat([density_outputs[k], new_density_outputs[k]], dim=1)\n density_outputs[k] = torch.gather(tmp_output, dim=1, index=z_index.unsqueeze(-1).expand_as(tmp_output))\n\n deltas = z_vals[..., 1:] - z_vals[..., :-1] # [N, T+t-1]\n deltas = torch.cat([deltas, sample_dist * torch.ones_like(deltas[..., :1])], dim=-1)\n alphas = 1 - torch.exp(-deltas * density_outputs['sigma'].squeeze(-1)) # [N, T+t]\n alphas_shifted = torch.cat([torch.ones_like(alphas[..., :1]), 1 - alphas + 1e-15], dim=-1) # [N, T+t+1]\n weights = alphas * torch.cumprod(alphas_shifted, dim=-1)[..., :-1] # [N, T+t]\n\n dirs = rays_d.view(-1, 1, 3).expand_as(xyzs)\n light_d = light_d.view(-1, 1, 3).expand_as(xyzs)\n for k, v in density_outputs.items():\n density_outputs[k] = v.view(-1, v.shape[-1])\n\n dirs = safe_normalize(dirs)\n sigmas, rgbs, normals = self(xyzs.reshape(-1, 3), dirs.reshape(-1, 3), light_d.reshape(-1, 3), ratio=ambient_ratio, shading=shading)\n rgbs = rgbs.view(N, -1, 3) # [N, T+t, 3]\n if normals is not None:\n normals = normals.view(N, -1, 3)\n\n # calculate weight_sum (mask)\n weights_sum = weights.sum(dim=-1) # [N]\n \n # calculate depth \n depth = torch.sum(weights * z_vals, dim=-1)\n\n # calculate color\n image = torch.sum(weights.unsqueeze(-1) * rgbs, dim=-2) # [N, 3], in [0, 1]\n\n # mix background color\n if bg_color is None:\n if self.opt.bg_radius > 0:\n # use the bg model to calculate bg_color\n bg_color = self.background(rays_d) # [N, 3]\n else:\n bg_color = 1\n \n image = image + (1 - weights_sum).unsqueeze(-1) * bg_color\n\n image = image.view(*prefix, 3)\n depth = depth.view(*prefix)\n weights_sum = weights_sum.reshape(*prefix)\n\n if self.training:\n if self.opt.lambda_orient > 0 and normals is not None:\n # orientation loss\n loss_orient = weights.detach() * (normals * dirs).sum(-1).clamp(min=0) ** 2\n results['loss_orient'] = loss_orient.sum(-1).mean()\n \n if self.opt.lambda_3d_normal_smooth > 0 and normals is not None:\n normals_perturb = self.normal(xyzs + torch.randn_like(xyzs) * 1e-2)\n results['loss_normal_perturb'] = (normals - normals_perturb).abs().mean()\n \n if (self.opt.lambda_2d_normal_smooth > 0 or self.opt.lambda_normal > 0) and normals is not None:\n normal_image = torch.sum(weights.unsqueeze(-1) * (normals + 1) / 2, dim=-2) # [N, 3], in [0, 1]\n results['normal_image'] = normal_image\n \n results['image'] = image\n results['depth'] = depth\n results['weights'] = weights\n results['weights_sum'] = weights_sum\n\n return results\n\n\n def run_cuda(self, rays_o, rays_d, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, perturb=False, T_thresh=1e-4, binarize=False, **kwargs):\n # rays_o, rays_d: [B, N, 3]\n # return: image: [B, N, 3], depth: [B, N]\n\n prefix = rays_o.shape[:-1]\n rays_o = rays_o.contiguous().view(-1, 3)\n rays_d = rays_d.contiguous().view(-1, 3)\n\n N = rays_o.shape[0] # B * N, in fact\n device = rays_o.device\n\n # pre-calculate near far\n nears, fars = raymarching.near_far_from_aabb(rays_o, rays_d, self.aabb_train if self.training else self.aabb_infer)\n\n # random sample light_d if not provided\n if light_d is None:\n # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face)\n light_d = safe_normalize(rays_o + torch.randn(3, device=rays_o.device)) # [N, 3]\n\n results = {}\n\n if self.training:\n self.train_step += 1\n # print(self.train_epoch)\n xyzs, dirs, ts, rays = raymarching.march_rays_train(rays_o, rays_d, self.bound, self.density_bitfield, self.cascade, self.grid_size, nears, fars, perturb, self.opt.dt_gamma, self.opt.max_steps)\n dirs = safe_normalize(dirs)\n\n if light_d.shape[0] > 1:\n flatten_rays = raymarching.flatten_rays(rays, xyzs.shape[0]).long()\n light_d = light_d[flatten_rays]\n\n \n sigmas, rgbs, normals = self(xyzs, dirs, light_d, ratio=ambient_ratio, shading=shading)\n weights, weights_sum, depth, image = raymarching.composite_rays_train(sigmas, rgbs, ts, rays, T_thresh, binarize)\n \n # normals related regularizations\n if self.opt.lambda_orient > 0 and normals is not None:\n # orientation loss \n loss_orient = weights.detach() * (normals * dirs).sum(-1).clamp(min=0) ** 2\n results['loss_orient'] = loss_orient.mean()\n \n if self.opt.lambda_3d_normal_smooth > 0 and normals is not None:\n normals_perturb = self.normal(xyzs + torch.randn_like(xyzs) * 1e-2)\n results['loss_normal_perturb'] = (normals - normals_perturb).abs().mean()\n \n if (self.opt.lambda_2d_normal_smooth > 0 or self.opt.lambda_normal > 0) and normals is not None:\n _, _, _, normal_image = raymarching.composite_rays_train(sigmas.detach(), (normals + 1) / 2, ts, rays, T_thresh, binarize)\n results['normal_image'] = normal_image\n \n # weights normalization\n results['weights'] = weights\n\n else:\n \n # allocate outputs \n dtype = torch.float32\n \n weights_sum = torch.zeros(N, dtype=dtype, device=device)\n depth = torch.zeros(N, dtype=dtype, device=device)\n image = torch.zeros(N, 3, dtype=dtype, device=device)\n \n n_alive = N\n rays_alive = torch.arange(n_alive, dtype=torch.int32, device=device) # [N]\n rays_t = nears.clone() # [N]\n\n step = 0\n \n while step < self.opt.max_steps: # hard coded max step\n\n # count alive rays \n n_alive = rays_alive.shape[0]\n\n # exit loop\n if n_alive <= 0:\n break\n\n # decide compact_steps\n n_step = max(min(N // n_alive, 8), 1)\n\n xyzs, dirs, ts = raymarching.march_rays(n_alive, n_step, rays_alive, rays_t, rays_o, rays_d, self.bound, self.density_bitfield, self.cascade, self.grid_size, nears, fars, perturb if step == 0 else False, self.opt.dt_gamma, self.opt.max_steps)\n dirs = safe_normalize(dirs)\n sigmas, rgbs, normals = self(xyzs, dirs, light_d, ratio=ambient_ratio, shading=shading)\n raymarching.composite_rays(n_alive, n_step, rays_alive, rays_t, sigmas, rgbs, ts, weights_sum, depth, image, T_thresh, binarize)\n\n rays_alive = rays_alive[rays_alive >= 0]\n #print(f'step = {step}, n_step = {n_step}, n_alive = {n_alive}, xyzs: {xyzs.shape}')\n\n step += n_step\n\n # mix background color\n if bg_color is None:\n if self.opt.bg_radius > 0:\n # use the bg model to calculate bg_color\n bg_color = self.background(rays_d) # [N, 3]\n else:\n bg_color = 1\n # bg_color = 1\n # bg_color = 1e-3\n if shading == 'normal':\n bg_color = 1\n image = image + (1 - weights_sum).unsqueeze(-1) * bg_color\n\n image = image.view(*prefix, 3)\n depth = depth.view(*prefix)\n\n weights_sum = weights_sum.reshape(*prefix)\n\n results['image'] = image\n results['depth'] = depth\n results['weights_sum'] = weights_sum\n \n return results\n\n @torch.no_grad()\n def init_tet(self, mesh=None):\n\n if mesh is not None:\n # normalize mesh\n scale = 0.8 / np.array(mesh.bounds[1] - mesh.bounds[0]).max()\n center = np.array(mesh.bounds[1] + mesh.bounds[0]) / 2\n mesh.vertices = (mesh.vertices - center) * scale\n\n # init scale\n # self.tet_scale = torch.from_numpy(np.abs(mesh.vertices).max(axis=0) + 1e-1).to(self.verts.dtype).cuda()\n self.tet_scale = torch.from_numpy(np.array([np.abs(mesh.vertices).max()]) + 1e-1).to(self.verts.dtype).cuda()\n self.verts = self.verts * self.tet_scale\n\n # init sdf\n import cubvh\n BVH = cubvh.cuBVH(mesh.vertices, mesh.faces)\n sdf, _, _ = BVH.signed_distance(self.verts, return_uvw=False, mode='watertight')\n sdf *= -10 # INNER is POSITIVE, also make it stronger\n self.sdf.data += sdf.to(self.sdf.data.dtype).clamp(-1, 1)\n\n else:\n\n if self.cuda_ray:\n density_thresh = min(self.mean_density, self.density_thresh)\n else:\n density_thresh = self.density_thresh\n \n if self.opt.density_activation == 'softplus':\n density_thresh = density_thresh * 25\n\n # init scale\n sigma = self.density(self.verts)['sigma'] # verts covers [-1, 1] now\n mask = sigma > density_thresh\n valid_verts = self.verts[mask]\n self.tet_scale = valid_verts.abs().amax(dim=0) + 1e-1\n self.verts = self.verts * self.tet_scale\n\n # init sigma\n sigma = self.density(self.verts)['sigma'] # new verts\n self.sdf.data += (sigma - density_thresh).clamp(-1, 1)\n\n print(f'[INFO] init dmtet: scale = {self.tet_scale}')\n\n\n def run_dmtet(self, rays_o, rays_d, mvp, h, w, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, **kwargs):\n # mvp: [B, 4, 4]\n\n device = mvp.device\n campos = rays_o[:, 0, :] # only need one ray per batch\n\n # random sample light_d if not provided\n if light_d is None:\n # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face)\n light_d = safe_normalize(campos + torch.randn_like(campos)).view(-1, 1, 1, 3) # [B, 1, 1, 3]\n\n results = {}\n\n # get mesh\n sdf = self.sdf\n deform = torch.tanh(self.deform) / self.opt.tet_grid_size\n\n verts, faces = self.dmtet(self.verts + deform, sdf, self.indices)\n\n # get normals\n i0, i1, i2 = faces[:, 0], faces[:, 1], faces[:, 2]\n v0, v1, v2 = verts[i0, :], verts[i1, :], verts[i2, :]\n\n faces = faces.int()\n \n face_normals = torch.cross(v1 - v0, v2 - v0)\n face_normals = safe_normalize(face_normals)\n \n vn = torch.zeros_like(verts)\n vn.scatter_add_(0, i0[:, None].repeat(1,3), face_normals)\n vn.scatter_add_(0, i1[:, None].repeat(1,3), face_normals)\n vn.scatter_add_(0, i2[:, None].repeat(1,3), face_normals)\n\n vn = torch.where(torch.sum(vn * vn, -1, keepdim=True) > 1e-20, vn, torch.tensor([0.0, 0.0, 1.0], dtype=torch.float32, device=vn.device))\n\n # rasterization\n verts_clip = torch.bmm(F.pad(verts, pad=(0, 1), mode='constant', value=1.0).unsqueeze(0).repeat(mvp.shape[0], 1, 1), \n mvp.permute(0,2,1)).float() # [B, N, 4]\n rast, rast_db = dr.rasterize(self.glctx, verts_clip, faces, (h, w))\n \n alpha = (rast[..., 3:] > 0).float()\n xyzs, _ = dr.interpolate(verts.unsqueeze(0), rast, faces) # [B, H, W, 3]\n normal, _ = dr.interpolate(vn.unsqueeze(0).contiguous(), rast, faces)\n normal = safe_normalize(normal)\n\n xyzs = xyzs.view(-1, 3)\n mask = (rast[..., 3:] > 0).view(-1).detach()\n\n # do the lighting here since we have normal from mesh now.\n albedo = torch.zeros_like(xyzs, dtype=torch.float32)\n if mask.any():\n masked_albedo = self.density(xyzs[mask])['albedo']\n albedo[mask] = masked_albedo.float()\n albedo = albedo.view(-1, h, w, 3)\n\n # these two modes lead to no parameters to optimize if using --lock_geo.\n if self.opt.lock_geo and shading in ['textureless', 'normal']:\n shading = 'lambertian'\n\n if shading == 'albedo':\n color = albedo\n elif shading == 'textureless':\n lambertian = ambient_ratio + (1 - ambient_ratio) * (normal * light_d).sum(-1).float().clamp(min=0)\n color = lambertian.unsqueeze(-1).repeat(1, 1, 1, 3)\n elif shading == 'normal':\n color = (normal + 1) / 2\n else: # 'lambertian'\n lambertian = ambient_ratio + (1 - ambient_ratio) * (normal * light_d).sum(-1).float().clamp(min=0)\n color = albedo * lambertian.unsqueeze(-1)\n\n color = dr.antialias(color, rast, verts_clip, faces).clamp(0, 1) # [B, H, W, 3]\n alpha = dr.antialias(alpha, rast, verts_clip, faces).clamp(0, 1) # [B, H, W, 1]\n\n # mix background color\n if bg_color is None:\n if self.opt.bg_radius > 0:\n # use the bg model to calculate bg_color\n bg_color = self.background(rays_d) # [N, 3]\n else:\n bg_color = 1\n \n if torch.is_tensor(bg_color) and len(bg_color.shape) > 1:\n bg_color = bg_color.view(-1, h, w, 3)\n \n depth = rast[:, :, :, [2]] # [B, H, W]\n color = color + (1 - alpha) * bg_color\n\n results['depth'] = depth \n results['image'] = color\n results['weights_sum'] = alpha.squeeze(-1)\n\n if self.opt.lambda_2d_normal_smooth > 0 or self.opt.lambda_normal > 0:\n normal_image = dr.antialias((normal + 1) / 2, rast, verts_clip, faces).clamp(0, 1) # [B, H, W, 3]\n results['normal_image'] = normal_image\n \n # regularizations\n if self.training:\n if self.opt.lambda_mesh_normal > 0:\n results['normal_loss'] = normal_consistency(face_normals, faces)\n if self.opt.lambda_mesh_laplacian > 0:\n results['lap_loss'] = laplacian_smooth_loss(verts, faces)\n\n return results\n\n def run_taichi(self, rays_o, rays_d, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, perturb=False, T_thresh=1e-4, **kwargs):\n # rays_o, rays_d: [B, N, 3], assumes B == 1\n # return: image: [B, N, 3], depth: [B, N]\n\n prefix = rays_o.shape[:-1]\n rays_o = rays_o.contiguous().view(-1, 3)\n rays_d = rays_d.contiguous().view(-1, 3)\n\n N = rays_o.shape[0] # N = B * N, in fact\n device = rays_o.device\n\n # pre-calculate near far\n exp_step_factor = kwargs.get('exp_step_factor', 0.)\n MAX_SAMPLES = 1024\n NEAR_DISTANCE = 0.01\n center = torch.zeros(1, 3)\n half_size = torch.ones(1, 3)\n _, hits_t, _ = self.ray_aabb_intersector.apply(rays_o, rays_d, center, half_size, 1)\n hits_t[(hits_t[:, 0, 0] >= 0) & (hits_t[:, 0, 0] < NEAR_DISTANCE), 0, 0] = NEAR_DISTANCE\n\n # TODO: should sample different light_d for each batch... but taichi end doesn't have a flatten_ray implemented currently...\n # random sample light_d if not provided\n if light_d is None:\n # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face)\n light_d = (rays_o[0] + torch.randn(3, device=device, dtype=torch.float))\n light_d = safe_normalize(light_d)\n\n results = {}\n\n if self.training:\n rays_a, xyzs, dirs, deltas, ts, _ = self.ray_marching(rays_o, rays_d, hits_t[:, 0], self.density_bitfield, self.cascade, self.bound, exp_step_factor, self.grid_size, MAX_SAMPLES)\n dirs = safe_normalize(dirs)\n # plot_pointcloud(xyzs.reshape(-1, 3).detach().cpu().numpy())\n sigmas, rgbs, normals = self(xyzs, dirs, light_d, ratio=ambient_ratio, shading=shading)\n _, weights_sum, depth, image, weights = self.volume_render(sigmas, rgbs, deltas, ts, rays_a, kwargs.get('T_threshold', 1e-4))\n \n # normals related regularizations\n if self.opt.lambda_orient > 0 and normals is not None:\n # orientation loss \n loss_orient = weights.detach() * (normals * dirs).sum(-1).clamp(min=0) ** 2\n results['loss_orient'] = loss_orient.mean()\n \n if self.opt.lambda_3d_normal_smooth > 0 and normals is not None:\n normals_perturb = self.normal(xyzs + torch.randn_like(xyzs) * 1e-2)\n results['loss_normal_perturb'] = (normals - normals_perturb).abs().mean()\n \n if (self.opt.lambda_2d_normal_smooth > 0 or self.opt.lambda_normal > 0) and normals is not None:\n _, _, _, normal_image, _ = self.volume_render(sigmas.detach(), (normals + 1) / 2, deltas, ts, rays_a, kwargs.get('T_threshold', 1e-4))\n results['normal_image'] = normal_image\n \n # weights normalization\n results['weights'] = weights\n\n else:\n \n # allocate outputs \n dtype = torch.float32\n \n weights_sum = torch.zeros(N, dtype=dtype, device=device)\n depth = torch.zeros(N, dtype=dtype, device=device)\n image = torch.zeros(N, 3, dtype=dtype, device=device)\n \n n_alive = N\n rays_alive = torch.arange(n_alive, dtype=torch.int32, device=device) # [N]\n rays_t = hits_t[:, 0, 0]\n step = 0\n \n min_samples = 1 if exp_step_factor == 0 else 4\n\n while step < self.opt.max_steps: # hard coded max step\n\n # count alive rays \n n_alive = rays_alive.shape[0]\n\n # exit loop\n if n_alive <= 0:\n break\n\n # decide compact_steps\n # n_step = max(min(N // n_alive, 8), 1)\n n_step = max(min(N // n_alive, 64), min_samples)\n\n xyzs, dirs, deltas, ts, N_eff_samples = \\\n self.raymarching_test_taichi(rays_o, rays_d, hits_t[:, 0], rays_alive,\n self.density_bitfield, self.cascade,\n self.bound, exp_step_factor,\n self.grid_size, MAX_SAMPLES, n_step)\n\n xyzs = self.rearrange(xyzs, 'n1 n2 c -> (n1 n2) c')\n dirs = self.rearrange(dirs, 'n1 n2 c -> (n1 n2) c')\n dirs = safe_normalize(dirs)\n valid_mask = ~torch.all(dirs == 0, dim=1)\n if valid_mask.sum() == 0:\n break\n\n sigmas = torch.zeros(len(xyzs), device=device)\n rgbs = torch.zeros(len(xyzs), 3, device=device)\n normals = torch.zeros(len(xyzs), 3, device=device)\n\n sigmas[valid_mask], _rgbs, normals = self(xyzs[valid_mask], dirs[valid_mask], light_d, ratio=ambient_ratio, shading=shading)\n rgbs[valid_mask] = _rgbs.float()\n sigmas = self.rearrange(sigmas, '(n1 n2) -> n1 n2', n2=n_step)\n rgbs = self.rearrange(rgbs, '(n1 n2) c -> n1 n2 c', n2=n_step)\n if normals is not None:\n normals = self.rearrange(normals, '(n1 n2) c -> n1 n2 c', n2=n_step)\n\n self.composite_test_fw(sigmas, rgbs, deltas, ts, hits_t[:,0], rays_alive,\n kwargs.get('T_threshold', 1e-4), N_eff_samples,\n weights_sum, depth, image)\n\n rays_alive = rays_alive[rays_alive >= 0]\n\n step += n_step\n\n # mix background color\n if bg_color is None:\n if self.opt.bg_radius > 0:\n # use the bg model to calculate bg_color\n bg_color = self.background(rays_d) # [N, 3]\n else:\n bg_color = 1\n\n image = image + self.rearrange(1 - weights_sum, 'n -> n 1') * bg_color\n image = image.view(*prefix, 3)\n\n depth = depth.view(*prefix)\n\n weights_sum = weights_sum.reshape(*prefix)\n\n results['image'] = image\n results['depth'] = depth\n results['weights_sum'] = weights_sum\n \n return results\n\n\n @torch.no_grad()\n def update_extra_state(self, decay=0.95, S=128):\n # call before each epoch to update extra states.\n\n if not (self.cuda_ray or self.taichi_ray):\n return \n \n ### update density grid\n tmp_grid = - torch.ones_like(self.density_grid)\n \n X = torch.arange(self.grid_size, dtype=torch.int32, device=self.aabb_train.device).split(S)\n Y = torch.arange(self.grid_size, dtype=torch.int32, device=self.aabb_train.device).split(S)\n Z = torch.arange(self.grid_size, dtype=torch.int32, device=self.aabb_train.device).split(S)\n\n for xs in X:\n for ys in Y:\n for zs in Z:\n \n # construct points\n xx, yy, zz = custom_meshgrid(xs, ys, zs)\n coords = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) # [N, 3], in [0, 128)\n indices = raymarching.morton3D(coords).long() # [N]\n xyzs = 2 * coords.float() / (self.grid_size - 1) - 1 # [N, 3] in [-1, 1]\n\n # cascading\n for cas in range(self.cascade):\n bound = min(2 ** cas, self.bound)\n half_grid_size = bound / self.grid_size\n # scale to current cascade's resolution\n cas_xyzs = xyzs * (bound - half_grid_size)\n # add noise in [-hgs, hgs]\n cas_xyzs += (torch.rand_like(cas_xyzs) * 2 - 1) * half_grid_size\n # query density\n sigmas = self.density(cas_xyzs)['sigma'].reshape(-1).detach()\n # assign \n tmp_grid[cas, indices] = sigmas\n # ema update\n valid_mask = self.density_grid >= 0\n self.density_grid[valid_mask] = torch.maximum(self.density_grid[valid_mask] * decay, tmp_grid[valid_mask])\n self.mean_density = torch.mean(self.density_grid[valid_mask]).item()\n self.iter_density += 1\n\n # convert to bitfield\n density_thresh = min(self.mean_density, self.density_thresh)\n if self.cuda_ray:\n self.density_bitfield = raymarching.packbits(self.density_grid, density_thresh, self.density_bitfield)\n elif self.taichi_ray:\n self.packbits_taichi(self.density_grid.reshape(-1).contiguous(), density_thresh, self.density_bitfield)\n\n # print(f'[density grid] min={self.density_grid.min().item():.4f}, max={self.density_grid.max().item():.4f}, mean={self.mean_density:.4f}, occ_rate={(self.density_grid > density_thresh).sum() / (128**3 * self.cascade):.3f}')\n\n\n def render(self, rays_o, rays_d, mvp, h, w, staged=False, max_ray_batch=4096, **kwargs):\n # rays_o, rays_d: [B, N, 3]\n # return: pred_rgb: [B, N, 3]\n B, N = rays_o.shape[:2]\n device = rays_o.device\n\n if self.dmtet:\n results = self.run_dmtet(rays_o, rays_d, mvp, h, w, **kwargs)\n elif self.cuda_ray:\n results = self.run_cuda(rays_o, rays_d, **kwargs)\n elif self.taichi_ray:\n results = self.run_taichi(rays_o, rays_d, **kwargs)\n else:\n if staged:\n depth = torch.empty((B, N), device=device)\n image = torch.empty((B, N, 3), device=device)\n weights_sum = torch.empty((B, N), device=device)\n\n for b in range(B):\n head = 0\n while head < N:\n tail = min(head + max_ray_batch, N)\n results_ = self.run(rays_o[b:b+1, head:tail], rays_d[b:b+1, head:tail], **kwargs)\n depth[b:b+1, head:tail] = results_['depth']\n weights_sum[b:b+1, head:tail] = results_['weights_sum']\n image[b:b+1, head:tail] = results_['image']\n head += max_ray_batch\n \n results = {}\n results['depth'] = depth\n results['image'] = image\n results['weights_sum'] = weights_sum\n\n else:\n results = self.run(rays_o, rays_d, **kwargs)\n\n return results" }, { "identifier": "get_encoder", "path": "encoding.py", "snippet": "def get_encoder(encoding, input_dim=3, \n multires=6, \n degree=4,\n num_levels=16, level_dim=2, base_resolution=16, log2_hashmap_size=19, desired_resolution=2048, align_corners=False, interpolation='linear',\n **kwargs):\n\n if encoding == 'None':\n return lambda x, **kwargs: x, input_dim\n \n elif encoding == 'frequency_torch':\n encoder = FreqEncoder_torch(input_dim=input_dim, max_freq_log2=multires-1, N_freqs=multires, log_sampling=True)\n\n elif encoding == 'frequency': # CUDA implementation, faster than torch.\n from freqencoder import FreqEncoder\n encoder = FreqEncoder(input_dim=input_dim, degree=multires)\n\n elif encoding == 'sphere_harmonics':\n from shencoder import SHEncoder\n encoder = SHEncoder(input_dim=input_dim, degree=degree)\n\n elif encoding == 'hashgrid':\n from gridencoder import GridEncoder\n encoder = GridEncoder(input_dim=input_dim, num_levels=num_levels, level_dim=level_dim, base_resolution=base_resolution, log2_hashmap_size=log2_hashmap_size, desired_resolution=desired_resolution, gridtype='hash', align_corners=align_corners, interpolation=interpolation)\n \n elif encoding == 'tiledgrid':\n from gridencoder import GridEncoder\n encoder = GridEncoder(input_dim=input_dim, num_levels=num_levels, level_dim=level_dim, base_resolution=base_resolution, log2_hashmap_size=log2_hashmap_size, desired_resolution=desired_resolution, gridtype='tiled', align_corners=align_corners, interpolation=interpolation)\n \n elif encoding == 'hashgrid_taichi':\n from taichi_modules.hash_encoder import HashEncoderTaichi\n encoder = HashEncoderTaichi(batch_size=4096) #TODO: hard encoded batch size\n\n elif encoding == 'multiscale_triplane':\n from gridencoder import MultiScaleTriplane\n # encoder = MiniTriplane(input_dim=input_dim)\n encoder = MultiScaleTriplane(input_dim=input_dim)\n\n elif encoding == 'multiscale_triplane_pooling':\n from gridencoder import MultiScaleTriplane_Pooling\n encoder = MultiScaleTriplane_Pooling(input_dim=input_dim)\n else:\n raise NotImplementedError('Unknown encoding mode, choose from [None, frequency, sphere_harmonics, hashgrid, tiledgrid]')\n\n return encoder, encoder.output_dim" }, { "identifier": "safe_normalize", "path": "nerf/utils.py", "snippet": "def safe_normalize(x, eps=1e-20):\n return x / torch.sqrt(torch.clamp(torch.sum(x * x, -1, keepdim=True), min=eps))" } ]
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np import tinycudann as tcnn from activation import trunc_exp, biased_softplus from .renderer import NeRFRenderer from encoding import get_encoder from .utils import safe_normalize
13,761
class MLP(nn.Module): def __init__(self, dim_in, dim_out, dim_hidden, num_layers, bias=True): super().__init__() self.dim_in = dim_in self.dim_out = dim_out self.dim_hidden = dim_hidden self.num_layers = num_layers net = [] for l in range(num_layers): net.append(nn.Linear(self.dim_in if l == 0 else self.dim_hidden, self.dim_out if l == num_layers - 1 else self.dim_hidden, bias=bias)) self.net = nn.ModuleList(net) def forward(self, x): for l in range(self.num_layers): x = self.net[l](x) if l != self.num_layers - 1: x = F.relu(x, inplace=True) return x class NeRFNetwork(NeRFRenderer): def __init__(self, opt, num_layers=3, hidden_dim=64, num_layers_bg=2, hidden_dim_bg=32, ): super().__init__(opt) self.num_layers = num_layers self.hidden_dim = hidden_dim self.encoder = tcnn.Encoding( n_input_dims=3, encoding_config={ "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "interpolation": "Smoothstep", "per_level_scale": np.exp2(np.log2(2048 * self.bound / 16) / (16 - 1)), }, dtype=torch.float32, # ENHANCE: default float16 seems unstable... ) self.in_dim = self.encoder.n_output_dims # use torch MLP, as tcnn MLP doesn't impl second-order derivative self.sigma_net = MLP(self.in_dim, 4, hidden_dim, num_layers, bias=True) self.density_activation = trunc_exp if self.opt.density_activation == 'exp' else biased_softplus # background network if self.opt.bg_radius > 0: self.num_layers_bg = num_layers_bg self.hidden_dim_bg = hidden_dim_bg # use a very simple network to avoid it learning the prompt... self.encoder_bg, self.in_dim_bg = get_encoder('frequency', input_dim=3, multires=6) self.bg_net = MLP(self.in_dim_bg, 3, hidden_dim_bg, num_layers_bg, bias=True) else: self.bg_net = None def common_forward(self, x): # sigma enc = self.encoder((x + self.bound) / (2 * self.bound)).float() h = self.sigma_net(enc) sigma = self.density_activation(h[..., 0] + self.density_blob(x)) albedo = torch.sigmoid(h[..., 1:]) return sigma, albedo def normal(self, x): with torch.enable_grad(): with torch.cuda.amp.autocast(enabled=False): x.requires_grad_(True) sigma, albedo = self.common_forward(x) # query gradient normal = - torch.autograd.grad(torch.sum(sigma), x, create_graph=True)[0] # [N, 3] # normal = self.finite_difference_normal(x)
class MLP(nn.Module): def __init__(self, dim_in, dim_out, dim_hidden, num_layers, bias=True): super().__init__() self.dim_in = dim_in self.dim_out = dim_out self.dim_hidden = dim_hidden self.num_layers = num_layers net = [] for l in range(num_layers): net.append(nn.Linear(self.dim_in if l == 0 else self.dim_hidden, self.dim_out if l == num_layers - 1 else self.dim_hidden, bias=bias)) self.net = nn.ModuleList(net) def forward(self, x): for l in range(self.num_layers): x = self.net[l](x) if l != self.num_layers - 1: x = F.relu(x, inplace=True) return x class NeRFNetwork(NeRFRenderer): def __init__(self, opt, num_layers=3, hidden_dim=64, num_layers_bg=2, hidden_dim_bg=32, ): super().__init__(opt) self.num_layers = num_layers self.hidden_dim = hidden_dim self.encoder = tcnn.Encoding( n_input_dims=3, encoding_config={ "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "interpolation": "Smoothstep", "per_level_scale": np.exp2(np.log2(2048 * self.bound / 16) / (16 - 1)), }, dtype=torch.float32, # ENHANCE: default float16 seems unstable... ) self.in_dim = self.encoder.n_output_dims # use torch MLP, as tcnn MLP doesn't impl second-order derivative self.sigma_net = MLP(self.in_dim, 4, hidden_dim, num_layers, bias=True) self.density_activation = trunc_exp if self.opt.density_activation == 'exp' else biased_softplus # background network if self.opt.bg_radius > 0: self.num_layers_bg = num_layers_bg self.hidden_dim_bg = hidden_dim_bg # use a very simple network to avoid it learning the prompt... self.encoder_bg, self.in_dim_bg = get_encoder('frequency', input_dim=3, multires=6) self.bg_net = MLP(self.in_dim_bg, 3, hidden_dim_bg, num_layers_bg, bias=True) else: self.bg_net = None def common_forward(self, x): # sigma enc = self.encoder((x + self.bound) / (2 * self.bound)).float() h = self.sigma_net(enc) sigma = self.density_activation(h[..., 0] + self.density_blob(x)) albedo = torch.sigmoid(h[..., 1:]) return sigma, albedo def normal(self, x): with torch.enable_grad(): with torch.cuda.amp.autocast(enabled=False): x.requires_grad_(True) sigma, albedo = self.common_forward(x) # query gradient normal = - torch.autograd.grad(torch.sum(sigma), x, create_graph=True)[0] # [N, 3] # normal = self.finite_difference_normal(x)
normal = safe_normalize(normal)
3
2023-10-11 04:06:20+00:00
16k
oracle/guardian-ai
guardian_ai/fairness/metrics/core.py
[ { "identifier": "EqualizedOddsScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class EqualizedOddsScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's true positive and false positive rates\n between subgroups and the rest of the subgroups.\n\n The disparity is measured by comparing the true positive and false positive\n rates on instances of a subgroup against the rest of the subgroups.\n\n True Positive Rate (also known as TPR, recall, or sensitivity) is\n calculated as TP / (TP + FN), where TP and FN are the number of true\n positives and false negatives, respectively.\n\n False Positive Rate (also known as FPR or fall-out) is calculated as\n FP / (FP + TN), where FP and TN are the number of false positives and\n true negatives, respectively.\n\n Equalized Odds [1] is computed by taking the maximum distance between\n TPR and FPR for a subgroup against the rest of the subgroups.\n\n Perfect score\n A perfect score for this metric means that the model has the same TPR and\n FPR when comparing a subgroup to the rest of the subgroups. For example,\n if the protected attributes are race and sex, then a perfect\n Equalized Odds disparity would mean that all combinations of values for\n race and sex have identical TPR and FPR. Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n References\n ----------\n [1] `Moritz Hardt et al. \"Equality of Opportunity in Supervised Learning\".\n Advances in Neural Information Processing Systems. 2016.\n <https://arxiv.org/pdf/1610.02413.pdf>`_\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import EqualizedOddsScorer\n scorer = EqualizedOddsScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=equalized_odds,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "ErrorRateScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class ErrorRateScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's error rate between all subgroup pairs.\n\n For each subgroup, the disparity is measured by comparing the error rate on\n instances of a subgroup against the rest of the subgroups.\n\n Error Rate (also known as inaccuracy) is calculated as\n (FP + FN) / N, where FP and FN are the number of false positives and\n false negatives, respectively, while N is the total Number of\n instances.\n\n Perfect score\n A perfect score for this metric means that the model does not make more\n mistakes for any of the subgroups more often than it\n does for the rest of the subgroups. For example, if the protected\n attributes are race and sex, then a perfect error rate disparity would\n mean that all combinations of values for race and sex have identical\n error rates. Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import ErrorRateScorer\n scorer = ErrorRateScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=error_rate,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "FalseDiscoveryRateScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class FalseDiscoveryRateScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's false discovery rate between all subgroup pairs.\n\n For each subgroup, the disparity is measured by comparing the false\n discovery rate on instances of a subgroup against the rest of the\n subgroups.\n\n False Discovery Rate (also known as FDR) is calculated as\n FP / (FP + TP), where FP and TP are the number of false positives and\n true positives, respectively.\n\n Perfect score\n A perfect score for this metric means that the model does not make more\n mistakes on the positive class for any of the subgroups more often than it\n does for the rest of the subgroups. For example, if the protected\n attributes are race and sex, then a perfect false discovery rate disparity\n would mean that all combinations of values for race and sex have identical\n false discovery rates. Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import FalseDiscoveryRateScorer\n scorer = FalseDiscoveryRateScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=false_discovery_rate,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "FalseNegativeRateScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class FalseNegativeRateScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's false negative rate between all subgroup pairs.\n\n For each subgroup, the disparity is measured by comparing the false\n negative rate on instances of a subgroup against the rest of the subgroups.\n\n False Negative Rate [1] (also known as FNR or miss rate) is calculated as\n FN / (FN + TP), where FN and TP are the number of false negatives and\n true positives, respectively.\n\n Perfect score\n A perfect score for this metric means that the model does not incorrectly\n predict the negative class for any of the subgroups more often than it\n does for the rest of the subgroups. For example, if the protected\n attributes are race and sex, then a perfect false negative rate disparity\n would mean that all combinations of values for race and sex have identical\n false negative rates. Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n References\n ----------\n [1] `Alexandra Chouldechova. \"Fair Prediction with Disparate Impact: A Study\n of Bias in Recidivism Prediction Instruments\". Big Data (2016).\n <https://www.liebertpub.com/doi/10.1089/big.2016.0047>`_\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import FalseNegativeRateScorer\n scorer = FalseNegativeRateScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=false_negative_rate,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "FalseOmissionRateScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class FalseOmissionRateScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's false omission rate between all subgroup pairs.\n\n For each subgroup, the disparity is measured by comparing the false\n omission rate on instances of a subgroup against the rest of the subgroups.\n\n False Omission Rate (also known as FOR) is calculated as\n FN / (FN + TN), where FN and TN are the number of false negatives and\n true negatives, respectively.\n\n Perfect score\n A perfect score for this metric means that the model does not make more\n mistakes on the negative class for any of the subgroups more often than it\n does for the rest of the subgroups. For example, if the protected\n attributes are race and sex, then a perfect false omission rate disparity\n would mean that all combinations of values for race and sex have identical\n false omission rates. Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import FalseOmissionRateScorer\n scorer = FalseOmissionRateScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=false_omission_rate,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "FalsePositiveRateScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class FalsePositiveRateScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's false positive rate between all subgroup pairs.\n\n For each subgroup, the disparity is measured by comparing the false\n positive rate on instances of a subgroup against the rest of the subgroups.\n\n False Positive Rate [1] (also known as FPR or fall-out) is calculated as\n FP / (FP + TN), where FP and TN are the number of false positives and\n true negatives, respectively.\n\n Perfect score\n A perfect score for this metric means that the model does not incorrectly\n predict the positive class for any of the subgroups more often than it\n does for the rest of the subgroups. For example, if the protected\n attributes are race and sex, then a perfect false positive rate disparity\n would mean that all combinations of values for race and sex have identical\n false positive rates. Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n References\n ----------\n [1] `Alexandra Chouldechova. \"Fair Prediction with Disparate Impact: A Study\n of Bias in Recidivism Prediction Instruments\". Big Data (2016).\n <https://www.liebertpub.com/doi/10.1089/big.2016.0047>`_\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import FalsePositiveRateScorer\n scorer = FalsePositiveRateScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=false_positive_rate,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "ModelStatisticalParityScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class ModelStatisticalParityScorer(_ModelFairnessScorer): # noqa: D412\n \"\"\"\n Measure the statistical parity [1] of a model's output between all subgroup pairs.\n\n Statistical parity (also known as Base Rate or Disparate Impact) states that\n a predictor is unbiased if the prediction is independent of the protected\n attribute.\n\n Statistical Parity is calculated as PP / N, where PP and N are the number of\n Positive Predictions and total Number of predictions made, respectively.\n\n Perfect score\n A perfect score for this metric means that the model does not predict\n positively any of the subgroups at a different rate than it does for the\n rest of the subgroups. For example, if the protected attributes are race\n and sex, then a perfect statistical parity would mean that all combinations\n of values for race and sex have identical ratios of positive predictions.\n Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n\n References\n ----------\n [1] `Cynthia Dwork et al. \"Fairness Through Awareness\". Innovations in\n Theoretical Computer Science. 2012. <https://arxiv.org/abs/1104.3913>`_\n\n Examples\n --------\n\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import ModelStatisticalParityScorer\n\n scorer = ModelStatisticalParityScorer(['race', 'sex'])\n scorer(model, X, y_true)\n\n This metric does not require `y_true`. It can also be called using\n\n .. code-block:: python\n\n scorer(model, X)\n \"\"\" # noqa: D412\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=model_statistical_parity,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )\n\n def __call__(\n self,\n model: Any,\n X: pd.DataFrame,\n y_true: Optional[Union[pd.Series, np.ndarray, List]] = None,\n supplementary_features: Optional[pd.DataFrame] = None,\n ):\n \"\"\"\n Compute the metric using a model's predictions on a given array\n of instances ``X``.\n\n Parameters\n ----------\n model: Any\n Object that implements a `predict(X)` function to collect\n categorical predictions.\n X : pandas.DataFrame\n Array of instances to compute the metric on.\n y_true : pandas.Series, numpy.ndarray, list, or None, default=None\n Array of groundtruth labels.\n supplementary_features : pandas.DataFrame, or None, default=None\n Array of supplementary features for each instance. Used in case\n one attribute in ``self.protected_attributes`` is not contained by\n ``X`` (e.g. if the protected attribute is not used by the model).\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to ``self.reduction``.\n\n\n Raises\n ------\n GuardianAIValueError\n - if a feature is present in both ``X``\n and ``supplementary_features``.\n\n \"\"\"\n y_pred = model.predict(X)\n\n subgroups = self._get_check_subgroups(X, supplementary_features)\n\n return self.metric(\n y_true, y_pred, subgroups, self.distance_measure, self.reduction\n )" }, { "identifier": "TheilIndexScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class TheilIndexScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's predictions according to groundtruth\n labels, as proposed by Speicher et al. [1].\n\n Intuitively, the Theil Index can be thought of as a measure of the\n divergence between a subgroup's different error distributions (i.e. false\n positives and false negatives) against the rest of the subgroups.\n\n Perfect score\n The perfect score for this metric is 0, meaning that the model does not\n have a different error distribution for any subgroup when compared to the\n rest of the subgroups. For example, if the protected attributes are\n race and sex, then a perfect Theil Index disparity would mean that all\n combinations of values for race and sex have identical error\n distributions.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str or None, default=None\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n References\n ----------\n [1] `Speicher, Till, et al. \"A unified approach to quantifying algorithmic\n unfairness: Measuring individual & group unfairness via inequality indices.\"\n Proceedings of the 24th ACM SIGKDD international conference on knowledge\n discovery & data mining. 2018. <https://arxiv.org/abs/1807.00787>`_\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import TheilIndexScorer\n scorer = TheilIndexScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: Optional[str] = None,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=theil_index,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=True,\n )" }, { "identifier": "TruePositiveRateScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class TruePositiveRateScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's true positive rate between\n all subgroup pairs (also known as equal opportunity).\n\n For each subgroup, the disparity is measured by comparing the true positive\n rate on instances of a subgroup against the rest of the subgroups.\n\n True Positive Rate [1] (also known as TPR, recall, or sensitivity) is\n calculated as TP / (TP + FN), where TP and FN are the number of true\n positives and false negatives, respectively.\n\n\n Perfect score\n A perfect score for this metric means that the model does not correctly\n predict the positive class for any of the subgroups more often than it\n does for the rest of the subgroups. For example, if the protected\n attributes are race and sex, then a perfect true positive rate disparity\n would mean that all combinations of values for race and sex have\n identical true positive rates. Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n References\n ----------\n [1] `Moritz Hardt et al. \"Equality of Opportunity in Supervised Learning\".\n Advances in Neural Information Processing Systems. 2016.\n <https://arxiv.org/pdf/1610.02413.pdf>`_\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import TruePositiveRateScorer\n scorer = TruePositiveRateScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=true_positive_rate,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "equalized_odds", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def equalized_odds(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's true positive and false positive rates\n between subgroups and the rest of the subgroups.\n\n For more details, refer to :class:`.EqualizedOddsScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import equalized_odds\n subgroups = X[['race', 'sex']]\n equalized_odds(y_true, y_pred, subgroups)\n \"\"\"\n tpr = true_positive_rate(\n y_true,\n y_pred,\n subgroups,\n distance_measure=distance_measure,\n reduction=reduction,\n )\n\n fpr = false_positive_rate(\n y_true,\n y_pred,\n subgroups,\n distance_measure=distance_measure,\n reduction=reduction,\n )\n if isinstance(tpr, dict):\n eq_odds = {}\n for key in tpr:\n eq_odds[key] = np.nanmax([tpr[key], fpr[key]])\n else:\n eq_odds = np.nanmax([tpr, fpr])\n\n return eq_odds" }, { "identifier": "error_rate", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def error_rate(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's error rate between all subgroup pairs.\n\n For more details, refer to :class:`.ErrorRateScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import error_rate\n subgroups = X[['race', 'sex']]\n error_rate(y_true, y_pred, subgroups)\n \"\"\"\n return _model_metric(\n y_true,\n y_pred,\n subgroups,\n metric=\"error_rate\",\n distance_measure=distance_measure,\n reduction=reduction,\n allow_y_true_none=False,\n allow_distance_measure_none=False,\n )" }, { "identifier": "false_discovery_rate", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def false_discovery_rate(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's false discovery rate between all subgroup pairs.\n\n For more details, refer to :class:`.FalseDiscoveryRateScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import false_discovery_rate\n subgroups = X[['race', 'sex']]\n false_discovery_rate(y_true, y_pred, subgroups)\n \"\"\"\n return _model_metric(\n y_true,\n y_pred,\n subgroups,\n metric=\"false_discovery_rate\",\n distance_measure=distance_measure,\n reduction=reduction,\n allow_y_true_none=False,\n allow_distance_measure_none=False,\n )" }, { "identifier": "false_negative_rate", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def false_negative_rate(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's false negative rate between all subgroup pairs.\n\n For more details, refer to :class:`.FalseNegativeRateScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import false_negative_rate\n subgroups = X[['race', 'sex']]\n false_negative_rate(y_true, y_pred, subgroups)\n \"\"\"\n return _model_metric(\n y_true,\n y_pred,\n subgroups,\n metric=\"false_negative_rate\",\n distance_measure=distance_measure,\n reduction=reduction,\n allow_y_true_none=False,\n allow_distance_measure_none=False,\n )" }, { "identifier": "false_omission_rate", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def false_omission_rate(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's false omission rate between all subgroup pairs.\n\n For more details, refer to :class:`.FalseOmissionRateScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import false_omission_rate\n subgroups = X[['race', 'sex']]\n false_omission_rate(y_true, y_pred, subgroups)\n \"\"\"\n return _model_metric(\n y_true,\n y_pred,\n subgroups,\n metric=\"false_omission_rate\",\n distance_measure=distance_measure,\n reduction=reduction,\n allow_y_true_none=False,\n allow_distance_measure_none=False,\n )" }, { "identifier": "false_positive_rate", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def false_positive_rate(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's false positive rate between all subgroup pairs.\n\n For more details, refer to :class:`.FalsePositiveRateScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import false_positive_rate\n subgroups = X[['race', 'sex']]\n false_positive_rate(y_true, y_pred, subgroups)\n \"\"\"\n return _model_metric(\n y_true,\n y_pred,\n subgroups,\n metric=\"false_positive_rate\",\n distance_measure=distance_measure,\n reduction=reduction,\n allow_y_true_none=False,\n allow_distance_measure_none=False,\n )" }, { "identifier": "model_statistical_parity", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def model_statistical_parity(\n y_true: Optional[Union[pd.Series, np.ndarray, List]] = None,\n y_pred: Optional[Union[pd.Series, np.ndarray, List]] = None,\n subgroups: Optional[pd.DataFrame] = None,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measure the statistical parity of a model's output between all subgroup pairs.\n\n For more details, refer to :class:`.ModelStatisticalParityScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list or None, default=None\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list or None, default=None\n Array of model predictions.\n subgroups : pandas.DataFrame or None, default=None\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n Raises\n ------\n GuardianAIValueError\n If Value of None is received for either `y_pred` or `subgroups`.\n\n Examples\n --------\n\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import model_statistical_parity\n subgroups = X[['race', 'sex']]\n model_statistical_parity(y_true, y_pred, subgroups)\n\n This metric does not require `y_true`. It can also be called using\n\n .. code-block:: python\n\n model_statistical_parity(None, y_pred, subgroups)\n model_statistical_parity(y_pred=y_pred, subgroups=subgroups)\n \"\"\" # noqa: D412\n\n if y_pred is None or subgroups is None:\n raise GuardianAIValueError(\n \"Value of None was received for either `y_pred` or `subgroups`. \"\n \"This may be due to calling the metric using only 2 positional \"\n \"arguments. If this is the case, either call the function by \"\n \"passing ``None`` as the first argument or use named arguments for \"\n \"`y_pred` and `subgroups`.\"\n )\n\n return _model_metric(\n None,\n y_pred,\n subgroups,\n metric=\"selection_rate\",\n distance_measure=distance_measure,\n reduction=reduction,\n allow_y_true_none=True,\n allow_distance_measure_none=False,\n )" }, { "identifier": "theil_index", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def theil_index(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: Optional[str] = None,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's predictions according to groundtruth\n labels, as proposed by Speicher et al. [1].\n\n For more details, refer to :class:`.TheilIndexScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str or None, default=None\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n Raises\n ------\n GuardianAIValueError\n If distance_measure values are given to Theil Index.\n\n References\n ----------\n [1]: `Speicher, Till, et al. \"A unified approach to quantifying algorithmic\n unfairness: Measuring individual & group unfairness via inequality indices.\"\n Proceedings of the 24th ACM SIGKDD international conference on knowledge\n discovery & data mining. 2018. <https://arxiv.org/abs/1807.00787>`_\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import theil_index\n subgroups = X[['race', 'sex']]\n theil_index(y_true, y_pred, subgroups)\n \"\"\"\n\n if distance_measure is not None and not isinstance(\n distance_measure, _DistanceMetric\n ):\n raise GuardianAIValueError(\n \"Theil Index does not accept distance_measure values. It should\"\n \"always be set to ``None``.\"\n )\n\n return _model_metric(\n y_true,\n y_pred,\n subgroups,\n metric=\"between_group_theil_index\",\n distance_measure=None,\n reduction=reduction,\n allow_y_true_none=False,\n allow_distance_measure_none=True,\n )" }, { "identifier": "true_positive_rate", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def true_positive_rate(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's true positive rate between all subgroup pairs.\n\n For more details, refer to :class:`.TruePositiveRateScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import true_positive_rate\n subgroups = X[['race', 'sex']]\n true_positive_rate(y_true, y_pred, subgroups)\n \"\"\"\n return _model_metric(\n y_true,\n y_pred,\n subgroups,\n metric=\"true_positive_rate\",\n distance_measure=distance_measure,\n reduction=reduction,\n allow_y_true_none=False,\n allow_distance_measure_none=False,\n )" }, { "identifier": "GuardianAIValueError", "path": "guardian_ai/utils/exception.py", "snippet": "class GuardianAIValueError(ValueError, GuardianAIError):\n \"\"\"Exception raised for unexpected values.\"\"\"\n\n pass" } ]
from guardian_ai.fairness.metrics.model import ( EqualizedOddsScorer, ErrorRateScorer, FalseDiscoveryRateScorer, FalseNegativeRateScorer, FalseOmissionRateScorer, FalsePositiveRateScorer, ModelStatisticalParityScorer, TheilIndexScorer, TruePositiveRateScorer, equalized_odds, error_rate, false_discovery_rate, false_negative_rate, false_omission_rate, false_positive_rate, model_statistical_parity, theil_index, true_positive_rate, ) from guardian_ai.utils.exception import GuardianAIValueError
12,843
#!/usr/bin/env python # -*- coding: utf-8 -*-- # Copyright (c) 2023 Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ """Core for fairness metrics""" fairness_scorers_dict = { # noqa N816 "statistical_parity": ModelStatisticalParityScorer, "TPR": TruePositiveRateScorer, "FPR": FalsePositiveRateScorer, "FNR": FalseNegativeRateScorer, "FOR": FalseOmissionRateScorer, "FDR": FalseDiscoveryRateScorer,
#!/usr/bin/env python # -*- coding: utf-8 -*-- # Copyright (c) 2023 Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ """Core for fairness metrics""" fairness_scorers_dict = { # noqa N816 "statistical_parity": ModelStatisticalParityScorer, "TPR": TruePositiveRateScorer, "FPR": FalsePositiveRateScorer, "FNR": FalseNegativeRateScorer, "FOR": FalseOmissionRateScorer, "FDR": FalseDiscoveryRateScorer,
"error_rate": ErrorRateScorer,
1
2023-10-09 09:48:50+00:00
16k
IST-DASLab/SparseFinetuning
llmfoundry/models/mpt/modeling_mpt.py
[ { "identifier": "attn_bias_shape", "path": "llmfoundry/models/layers/attention.py", "snippet": "def attn_bias_shape(attn_impl: str, n_heads: int, seq_len: int, alibi: bool,\n prefix_lm: bool, causal: bool, use_sequence_id: bool):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n if (prefix_lm or not causal) or use_sequence_id:\n return (1, n_heads, seq_len, seq_len)\n return (1, n_heads, 1, seq_len)\n elif prefix_lm or use_sequence_id:\n return (1, 1, seq_len, seq_len)\n return None\n else:\n raise ValueError(f'{attn_impl=} is an invalid setting.')" }, { "identifier": "build_attn_bias", "path": "llmfoundry/models/layers/attention.py", "snippet": "def build_attn_bias(\n attn_impl: str,\n attn_bias: torch.Tensor,\n n_heads: int,\n seq_len: int,\n causal: bool = False,\n alibi: bool = False,\n alibi_bias_max: int = 8,\n):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n # in place add alibi to attn bias\n device, dtype = attn_bias.device, attn_bias.dtype\n attn_bias = attn_bias.add(\n build_alibi_bias(\n n_heads,\n seq_len,\n full=not causal,\n alibi_bias_max=alibi_bias_max,\n device=device,\n dtype=dtype,\n ))\n return attn_bias\n else:\n raise ValueError(f'{attn_impl=} is an invalid setting.')" }, { "identifier": "MPTBlock", "path": "llmfoundry/models/layers/blocks.py", "snippet": "class MPTBlock(nn.Module):\n\n def __init__(\n self,\n d_model: int,\n n_heads: int,\n expansion_ratio: int,\n attn_config: Optional[Dict] = None,\n ffn_config: Optional[Dict] = None,\n resid_pdrop: float = 0.0,\n norm_type: str = 'low_precision_layernorm',\n verbose: int = 0,\n fc_type: str = 'torch',\n device: Optional[str] = None,\n **kwargs: Any,\n ):\n if attn_config is None:\n attn_config = {\n 'attn_type': 'multihead_attention',\n 'attn_pdrop': 0.0,\n 'attn_impl': 'triton',\n 'qk_ln': False,\n 'clip_qkv': None,\n 'softmax_scale': None,\n 'prefix_lm': False,\n 'attn_uses_sequence_id': False,\n 'alibi': False,\n 'alibi_bias_max': 8,\n }\n\n if ffn_config is None:\n ffn_config = {\n 'ffn_type': 'mptmlp',\n }\n\n del kwargs # unused, just to capture any extra args from the config\n super().__init__()\n\n norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]\n assert isinstance(attn_config['attn_type'], str)\n attn_class = ATTN_CLASS_REGISTRY[attn_config['attn_type']]\n\n # necessary to avoid passing extraneous args into attn_class while allowing the use of **kwargs\n args_to_exclude_in_attn_class = {\n 'attn_type', 'prefix_lm', 'alibi', 'attn_uses_sequence_id',\n 'alibi_bias_max'\n }\n attn_config_subset_for_attn_class = {\n k: v\n for k, v in attn_config.items()\n if k not in args_to_exclude_in_attn_class\n }\n\n self.norm_1 = norm_class(d_model, device=device)\n self.attn = attn_class(d_model=d_model,\n n_heads=n_heads,\n fc_type=fc_type,\n verbose=verbose,\n device=device,\n **attn_config_subset_for_attn_class)\n self.norm_2 = None\n if not getattr(FFN_CLASS_REGISTRY[ffn_config['ffn_type']], '_has_norm',\n False):\n self.norm_2 = norm_class(d_model, device=device)\n self.ffn = build_ffn(\n d_model=d_model,\n expansion_ratio=expansion_ratio,\n device=device,\n **ffn_config,\n )\n self.resid_attn_dropout = nn.Dropout(resid_pdrop)\n self.resid_ffn_dropout = nn.Dropout(resid_pdrop)\n\n def forward(\n self,\n x: torch.Tensor,\n past_key_value: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,\n attn_bias: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.ByteTensor] = None,\n is_causal: bool = True,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[\n torch.Tensor, torch.Tensor]]]:\n a = self.norm_1(x)\n b, attn_weights, past_key_value = self.attn(\n a,\n past_key_value=past_key_value,\n attn_bias=attn_bias,\n attention_mask=attention_mask,\n is_causal=is_causal,\n )\n x = x + self.resid_attn_dropout(b)\n m = x\n if self.norm_2 is not None:\n m = self.norm_2(x)\n n = self.ffn(m)\n x = x + self.resid_ffn_dropout(n)\n return x, attn_weights, past_key_value" }, { "identifier": "SharedEmbedding", "path": "llmfoundry/models/layers/custom_embedding.py", "snippet": "class SharedEmbedding(nn.Embedding):\n\n def forward(self, input: Tensor, unembed: bool = False) -> Tensor:\n if unembed:\n return F.linear(input, self.weight)\n return super().forward(input)" }, { "identifier": "FC_CLASS_REGISTRY", "path": "llmfoundry/models/layers/fc.py", "snippet": "FC_CLASS_REGISTRY = {\n 'torch': nn.Linear,\n}" }, { "identifier": "FFN_CLASS_REGISTRY", "path": "llmfoundry/models/layers/ffn.py", "snippet": "FFN_CLASS_REGISTRY = {\n 'mptmlp': MPTMLP,\n}" }, { "identifier": "MPTMLP", "path": "llmfoundry/models/layers/ffn.py", "snippet": "class MPTMLP(nn.Module):\n\n def __init__(\n self,\n d_model: int,\n expansion_ratio: int,\n fc_type: str = 'torch',\n device: Optional[str] = None,\n ):\n super().__init__()\n fc_kwargs = {}\n if fc_type != 'te':\n fc_kwargs['device'] = device\n self.up_proj = FC_CLASS_REGISTRY[fc_type](\n d_model,\n expansion_ratio * d_model,\n **fc_kwargs,\n )\n self.act = nn.GELU(approximate='none')\n self.down_proj = FC_CLASS_REGISTRY[fc_type](\n expansion_ratio * d_model,\n d_model,\n **fc_kwargs,\n )\n self.down_proj._is_residual = True # type: ignore\n\n def forward(self, x: torch.Tensor):\n return self.down_proj(self.act(self.up_proj(x)))" }, { "identifier": "build_ffn", "path": "llmfoundry/models/layers/ffn.py", "snippet": "def build_ffn(\n d_model: int,\n expansion_ratio: int,\n fc_type: str = 'torch',\n device: Optional[str] = None,\n **kwargs: Any,\n):\n ffn_type = kwargs.pop('ffn_type')\n if ffn_type == 'mptmlp':\n if len(kwargs) > 0:\n raise ValueError(\n f'MPTMLP got an unexpected keyword argument: {kwargs}')\n return MPTMLP(\n d_model=d_model,\n expansion_ratio=expansion_ratio,\n fc_type=fc_type,\n device=device,\n )\n elif ffn_type == 'te_ln_mlp':\n assert te is not None\n return te.LayerNormMLP(\n hidden_size=d_model,\n ffn_hidden_size=d_model * expansion_ratio,\n **kwargs,\n )\n\n raise ValueError(f'{ffn_type=} not recognized.')" }, { "identifier": "NORM_CLASS_REGISTRY", "path": "llmfoundry/models/layers/norm.py", "snippet": "NORM_CLASS_REGISTRY: Dict[str, Type[torch.nn.Module]] = {\n 'layernorm': torch.nn.LayerNorm,\n 'low_precision_layernorm': LPLayerNorm,\n 'rmsnorm': RMSNorm,\n 'low_precision_rmsnorm': LPRMSNorm,\n}" }, { "identifier": "MPTConfig", "path": "llmfoundry/models/mpt/configuration_mpt.py", "snippet": "class MPTConfig(PretrainedConfig):\n model_type = 'mpt'\n\n def __init__(\n self,\n d_model: int = 2048,\n n_heads: int = 16,\n n_layers: int = 24,\n expansion_ratio: int = 4,\n max_seq_len: int = 2048,\n vocab_size: int = 50368,\n resid_pdrop: float = 0.0,\n emb_pdrop: float = 0.0,\n learned_pos_emb: bool = True,\n attn_config: Dict = attn_config_defaults,\n ffn_config: Dict = ffn_config_defaults,\n init_device: str = 'cpu',\n logit_scale: Optional[Union[float, str]] = None,\n no_bias: bool = False,\n verbose: int = 0,\n embedding_fraction: float = 1.0,\n norm_type: str = 'low_precision_layernorm',\n use_cache: bool = False,\n init_config: Dict = init_config_defaults,\n fc_type: str = 'torch',\n **kwargs: Any,\n ):\n \"\"\"The MPT configuration class.\n\n Args:\n d_model (int): The size of the embedding dimension of the model.\n n_heads (int): The number of attention heads.\n n_layers (int): The number of layers in the model.\n expansion_ratio (int): The ratio of the up/down scale in the ffn.\n max_seq_len (int): The maximum sequence length of the model.\n vocab_size (int): The size of the vocabulary.\n resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.\n emb_pdrop (float): The dropout probability for the embedding layer.\n learned_pos_emb (bool): Whether to use learned positional embeddings\n attn_config (Dict): A dictionary used to configure the model's attention module:\n attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention, grouped_query_attention\n attn_pdrop (float): The dropout probability for the attention layers.\n attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'.\n qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer.\n clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to\n this value.\n softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None,\n use the default scale of ``1/sqrt(d_keys)``.\n prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an\n extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix\n can attend to one another bi-directionally. Tokens outside the prefix use causal attention.\n attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id.\n When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates\n which sub-sequence each token belongs to.\n Defaults to ``False`` meaning any provided `sequence_id` will be ignored.\n alibi (bool): Whether to use the alibi bias instead of position embeddings.\n alibi_bias_max (int): The maximum value of the alibi bias.\n kv_n_heads (Optional[int]): For grouped_query_attention only, allow user to specify number of kv heads.\n ffn_config (Dict): A dictionary used to configure the model's ffn module:\n ffn_type (str): type of ffn to use. Options: mptmlp, te_ln_mlp\n init_device (str): The device to use for parameter initialization.\n logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.\n no_bias (bool): Whether to use bias in all layers.\n verbose (int): The verbosity level. 0 is silent.\n embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.\n norm_type (str): choose type of norm to use\n use_cache (bool): Whether or not the model should return the last key/values attentions\n init_config (Dict): A dictionary used to configure the model initialization:\n init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_',\n 'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or\n 'xavier_normal_'. These mimic the parameter initialization methods in PyTorch.\n init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True.\n emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer.\n emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution\n used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``.\n init_std (float): The standard deviation of the normal distribution used to initialize the model,\n if using the baseline_ parameter initialization scheme.\n init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes.\n fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes.\n init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes.\n ---\n See llmfoundry.models.utils.param_init_fns.py for info on other param init config options\n fc_type (str): choose fc layer implementation. Options: torch and te. te layers support fp8 when using H100 GPUs.\n \"\"\"\n self.d_model = d_model\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.expansion_ratio = expansion_ratio\n self.max_seq_len = max_seq_len\n self.vocab_size = vocab_size\n self.resid_pdrop = resid_pdrop\n self.emb_pdrop = emb_pdrop\n self.learned_pos_emb = learned_pos_emb\n self.attn_config = attn_config\n self.ffn_config = ffn_config\n self.init_device = init_device\n self.logit_scale = logit_scale\n self.no_bias = no_bias\n self.verbose = verbose\n self.embedding_fraction = embedding_fraction\n self.norm_type = norm_type\n self.use_cache = use_cache\n self.init_config = init_config\n self.fc_type = fc_type\n if 'name' in kwargs:\n del kwargs['name']\n if 'loss_fn' in kwargs:\n del kwargs['loss_fn']\n if self.attn_config.get('alibi', False):\n self.learned_pos_emb = False\n warnings.warn(\n f'alibi is turned on, setting `learned_pos_emb` to `False.`')\n super().__init__(**kwargs)\n\n self._validate_config()\n\n def _set_config_defaults(self, config: Dict[str, Any],\n config_defaults: Dict[str, Any]):\n # set config defaults\n for k, v in config_defaults.items():\n if k not in config:\n config[k] = v\n return config\n\n def _validate_config(self):\n # set config defaults\n self.attn_config = self._set_config_defaults(\n self.attn_config,\n attn_config_defaults,\n )\n self.ffn_config = self._set_config_defaults(\n self.ffn_config,\n ffn_config_defaults,\n )\n self.init_config = self._set_config_defaults(\n self.init_config,\n init_config_defaults,\n )\n\n if self.d_model % self.n_heads != 0:\n raise ValueError('d_model must be divisible by n_heads')\n if any(\n prob < 0 or prob > 1 for prob in\n [self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop]):\n raise ValueError(\n \"self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1\"\n )\n if self.attn_config['attn_impl'] not in ['torch', 'flash', 'triton']:\n raise ValueError(\n f\"Unknown attn_impl={self.attn_config['attn_impl']}\")\n if self.attn_config['prefix_lm'] and self.attn_config[\n 'attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError(\n 'prefix_lm only implemented with torch and triton attention.')\n if self.attn_config['alibi'] and self.attn_config['attn_impl'] not in [\n 'torch', 'triton'\n ]:\n raise NotImplementedError(\n 'alibi only implemented with torch and triton attention.')\n if self.attn_config['attn_uses_sequence_id'] and self.attn_config[\n 'attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError(\n 'attn_uses_sequence_id only implemented with torch and triton attention.'\n )\n if self.embedding_fraction > 1 or self.embedding_fraction <= 0:\n raise ValueError(\n 'model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!'\n )\n if isinstance(self.logit_scale,\n str) and self.logit_scale != 'inv_sqrt_d_model':\n raise ValueError(\n f\"{self.logit_scale=} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.\"\n )\n if self.init_config.get('name', None) is None:\n raise ValueError(f\"{self.init_config=} 'name' needs to be set.\")\n if not self.learned_pos_emb and not self.attn_config['alibi']:\n warnings.warn(\n f'Positional information not being provided to the model using either learned_pos_emb or alibi.'\n )\n if self.fc_type == 'te' or self.ffn_config['ffn_type'] == 'te_ln_mlp':\n try:\n import transformer_engine.pytorch as te\n del te # unused\n except:\n raise ImportError(\n 'TransformerEngine import fail. `fc_type: te` requires TransformerEngine be installed. '\n +\n 'The required version of transformer_engine also requires FlashAttention v1.0.6 is installed:\\n'\n + 'pip install flash-attn==1.0.6 --no-build-isolation \\n' +\n 'pip install git+https://github.com/NVIDIA/TransformerEngine.git@144e4888b2cdd60bd52e706d5b7a79cb9c1a7156'\n )\n if self.ffn_config['ffn_type'] == 'mptmlp':\n self.ffn_config['fc_type'] = self.fc_type\n elif self.ffn_config['ffn_type'] == 'te_ln_mlp':\n self.ffn_config['bias'] = not self.no_bias" }, { "identifier": "AutoTokenizerForMOD", "path": "llmfoundry/models/utils/adapt_tokenizer.py", "snippet": "class AutoTokenizerForMOD(AutoTokenizer):\n \"\"\"AutoTokenizer + Adaptation for MOD.\n\n A simple wrapper around AutoTokenizer to make instantiating\n an MOD-adapted tokenizer a bit easier.\n\n MOD-adapted tokenizers have sentinel tokens (e.g., <extra_id_0>),\n a padding token, and a property to get the token ids of the\n sentinel tokens.\n \"\"\"\n\n @classmethod\n def from_pretrained(cls, *args: Any, **kwargs: Any):\n \"\"\"See `AutoTokenizer.from_pretrained` docstring.\"\"\"\n tokenizer = super().from_pretrained(*args, **kwargs)\n adapt_tokenizer_for_denoising(tokenizer)\n return tokenizer" }, { "identifier": "adapt_tokenizer_for_denoising", "path": "llmfoundry/models/utils/adapt_tokenizer.py", "snippet": "def adapt_tokenizer_for_denoising(tokenizer: PreTrainedTokenizerBase):\n \"\"\"Adds sentinel tokens and padding token (if missing).\n\n Expands the tokenizer vocabulary to include sentinel tokens\n used in mixture-of-denoiser tasks as well as a padding token.\n\n All added tokens are added as special tokens. No tokens are\n added if sentinel tokens and padding token already exist.\n \"\"\"\n # Add sentinel tokens (e.g., <extra_id_0>, <extra_id_1>, and so on). Has no effect if these are already in the vocab.\n sentinels_to_add = [f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)]\n tokenizer.add_tokens(sentinels_to_add, special_tokens=True)\n\n # If the padding token has not been set, add <pad> and use it\n if tokenizer.pad_token is None:\n tokenizer.add_tokens('<pad>', special_tokens=True)\n tokenizer.pad_token = '<pad>'\n assert tokenizer.pad_token_id is not None\n\n # Register a property that gets us the ids of the sentinel tokens\n sentinels = ''.join([f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)])\n _sentinel_token_ids = tokenizer(sentinels,\n add_special_tokens=False).input_ids\n\n tokenizer.sentinel_token_ids = _sentinel_token_ids" }, { "identifier": "add_bidirectional_mask_if_missing", "path": "llmfoundry/models/utils/hf_prefixlm_converter.py", "snippet": "def add_bidirectional_mask_if_missing(batch: MutableMapping):\n \"\"\"Attempts to add bidirectional_mask to batch if missing.\n\n Raises:\n KeyError if bidirectional_mask is missing and can't be inferred\n \"\"\"\n if 'bidirectional_mask' not in batch:\n if batch.get('mode', None) == 'icl_task':\n batch['bidirectional_mask'] = batch['attention_mask'].clone()\n for i, continuation_indices in enumerate(\n batch['continuation_indices']):\n batch['bidirectional_mask'][i, continuation_indices] = 0\n elif ('labels' in batch) and ('attention_mask' in batch):\n batch['bidirectional_mask'] = torch.logical_and(\n torch.eq(batch['attention_mask'], 1),\n torch.eq(batch['labels'], -100),\n ).type_as(batch['attention_mask'])\n else:\n raise KeyError(\n 'No bidirectional_mask in batch and not sure how to construct one.'\n )" }, { "identifier": "convert_hf_causal_lm_to_prefix_lm", "path": "llmfoundry/models/utils/hf_prefixlm_converter.py", "snippet": "def convert_hf_causal_lm_to_prefix_lm(\n model: CAUSAL_LM_TYPES) -> CAUSAL_LM_TYPES:\n \"\"\"Converts a HuggingFace Causal LM to a Prefix LM.\n\n Supported HuggingFace model classes:\n - `GPT2LMHeadModel`\n - `GPTNeoForCausalLM`\n - `GPTNeoXForCausalLM`\n - `GPTJForCausalLM`\n - `BloomForCausalLM`\n - `OPTForCausalLM`\n\n Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the\n `generate` method and/or select underlying methods depending on the model class.\n\n These changes preserve the model API, but add a new input to `forward`: \"bidirectional_mask\".\n\n Notes on training:\n To actually train the converted model as a Prefix LM, training batches will need to indicate\n the prefix/target structure by including `bidirectional_mask` as part of the batch inputs.\n\n **This is not a standard input and requires custom layers either within or after your dataloader.**\n\n In addition to adding `bidirectional_mask` to the batch, this custom code should modify `labels`\n such that `batch['labels'][batch['bidirectional_mask'] == 1] == -100`.\n That is, the prefix portion of the sequence should not generate any loss. Loss should only be\n generated by the target portion of the sequence.\n\n Notes on `GPTNeoForCausalLM`:\n To simplify the implementation, \"global\" and \"local\" attention layers are handled differently.\n For \"global\" layers, we handle conversion as described above. For \"local\" layers, which use a\n causal attention mask within a restricted local window, we do not alter the masking.\n\n Notes on `forward` method conversion:\n After conversion, the `forward` method will handle a new input, `bidirectional_mask`,\n which should be a [batch_size, seq_length] byte tensor, where 1 indicates token positions\n belonging to the prefix (prefix tokens can attend to one another bidirectionally), and\n 0 indicates token positions belonging to the target.\n\n The new `forward` method will incorporate `bidirectional_mask` (if supplied) into the existing\n causal mask, call the original `forward` method, and (if the causal mask is a buffer) reset\n the causal masks before returning the result.\n\n Notes on `generate` method conversion:\n After conversion, the `generate` method will have the same signature but will internally\n convert all causal masks to be purely bidirectional, call the original `generate` method, and\n (where appropriate) reset the causal masks before returning the result.\n\n This works thanks to the logic of the HuggingFace `generate` API, which first encodes the token\n \"prompt\" passed to `generate` (which is treated as the prefix) and then sequentially generates\n each new token. Encodings are cached as generation happens, so all prefix tokens can attend to one\n another (as expected in a Prefix LM) and generated tokens can only attend to prefix tokens and\n previously-generated tokens (also as expected in a Prefix LM).\n\n To preserve the API, the original methods are renamed to `_original_forward` and\n `_original_generate`, and replaced with new `forward` and `generate` methods that wrap\n them, respectively. Although implementation details vary by model class.\n \"\"\"\n if isinstance(model, _SUPPORTED_GPT_MODELS):\n return _convert_gpt_causal_lm_to_prefix_lm(model)\n\n elif isinstance(model, BloomForCausalLM):\n return _convert_bloom_causal_lm_to_prefix_lm(model)\n\n elif isinstance(model, OPTForCausalLM):\n return _convert_opt_causal_lm_to_prefix_lm(model)\n\n else:\n raise TypeError(\n f'Cannot convert model to Prefix LM. ' +\\\n f'Model does not belong to set of supported HF models:' +\\\n f'\\n{_SUPPORTED_HF_MODELS}'\n )" }, { "identifier": "init_empty_weights", "path": "llmfoundry/models/utils/meta_init_context.py", "snippet": "@contextmanager\ndef init_empty_weights(include_buffers: bool = False):\n \"\"\"Meta initialization context manager.\n\n A context manager under which models are initialized with all parameters\n on the meta device, therefore creating an empty model. Useful when just\n initializing the model would blow the available RAM.\n\n Args:\n include_buffers (`bool`, *optional*, defaults to `False`): Whether or\n not to also put all buffers on the meta device while initializing.\n\n Example:\n ```python\n import torch.nn as nn\n\n # Initialize a model with 100 billions parameters in no time and without using any RAM.\n with init_empty_weights():\n tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n ```\n\n <Tip warning={true}>\n\n Any model created under this context manager has no weights. As such you can't do something like\n `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].\n\n </Tip>\n \"\"\"\n with init_on_device(torch.device('meta'),\n include_buffers=include_buffers) as f:\n yield f" }, { "identifier": "generic_param_init_fn_", "path": "llmfoundry/models/utils/param_init_fns.py", "snippet": "def generic_param_init_fn_(\n module: nn.Module,\n init_fn_: Callable,\n n_layers: int,\n d_model: Optional[int] = None,\n init_div_is_residual: Union[int, float, str, bool] = True,\n emb_init_std: Optional[float] = None,\n emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,\n verbose: int = 0,\n **kwargs: Any,\n):\n del kwargs # unused, just to capture any extra args from the config\n if verbose > 1:\n warnings.warn(\n f'If model has bias parameters they are initialized to 0.')\n\n # enable user to divide _is_residual weights by\n # a value which defaults to math.sqrt(2 * cfg.n_layers)\n init_div_is_residual = init_div_is_residual\n\n if init_div_is_residual is False:\n # not used, for pyright\n div_is_residual = 1.0\n elif init_div_is_residual is True:\n div_is_residual = math.sqrt(2 * n_layers)\n elif isinstance(init_div_is_residual, float) or isinstance(\n init_div_is_residual, int):\n div_is_residual = init_div_is_residual\n elif isinstance(\n init_div_is_residual, # type: ignore\n str) and init_div_is_residual.isnumeric():\n # do not trust YAML parsing to always convert numbers to numbers\n div_is_residual = float(init_div_is_residual)\n else:\n # not used, for pyright\n div_is_residual = 1.0\n raise ValueError(\n f'Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}'\n )\n\n if init_div_is_residual is not False:\n if verbose > 1:\n warnings.warn(\n f'Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. ' +\\\n f'Set `init_div_is_residual: false` in init config to disable this.'\n )\n\n if isinstance(module, tuple(set(FC_CLASS_REGISTRY.values()))):\n # Linear\n if hasattr(module, '_fused'):\n fused_init_helper_(module, init_fn_)\n else:\n init_fn_(module.weight)\n if module.bias is not None:\n assert isinstance(module.bias, torch.Tensor)\n torch.nn.init.zeros_(module.bias)\n\n if init_div_is_residual is not False and getattr(\n module, '_is_residual', False):\n with torch.no_grad():\n module.weight.div_(div_is_residual) # type: ignore\n\n elif isinstance(module, nn.Embedding):\n # Embedding\n if emb_init_std is not None:\n std = emb_init_std\n if std == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)\n if verbose > 1:\n warnings.warn(\n f'Embedding layer initialized using normal distribution with mean=0 and {std=}.'\n )\n elif emb_init_uniform_lim is not None:\n lim = emb_init_uniform_lim\n if isinstance(lim, Sequence):\n if len(lim) > 2:\n raise ValueError(\n f'Uniform init requires a min and a max limit. User input: {lim}.'\n )\n if lim[0] == lim[1]:\n warnings.warn(f'Embedding layer initialized to {lim[0]}.')\n else:\n if lim == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n lim = [-lim, lim]\n a, b = lim\n emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)\n if verbose > 1:\n warnings.warn(\n f'Embedding layer initialized using uniform distribution in range {lim}.'\n )\n else:\n emb_init_fn_ = init_fn_\n\n emb_init_fn_(module.weight)\n\n elif isinstance(module,\n tuple(set(NORM_CLASS_REGISTRY.values()))): # type: ignore\n # Norm\n if verbose > 1:\n warnings.warn(\n f'Norm weights are set to 1. If norm layer has a bias it is initialized to 0.'\n )\n if hasattr(module, 'weight') and module.weight is not None:\n torch.nn.init.ones_(module.weight) # type: ignore\n if hasattr(module, 'bias') and module.bias is not None:\n torch.nn.init.zeros_(module.bias) # type: ignore\n\n elif isinstance(module, nn.MultiheadAttention):\n # torch's MultiheadAttention\n if module._qkv_same_embed_dim:\n assert module.in_proj_weight is not None\n assert module.q_proj_weight is None and module.k_proj_weight is None and module.v_proj_weight is None\n assert d_model is not None\n # in_proj_weight is actually 3 layers and should be split up for width based init\n _d = d_model\n splits = (0, _d, 2 * _d, 3 * _d)\n for s, e in zip(splits[:-1], splits[1:]):\n init_fn_(module.in_proj_weight[s:e])\n else:\n assert module.q_proj_weight is not None and module.k_proj_weight is not None and module.v_proj_weight is not None\n assert module.in_proj_weight is None\n init_fn_(module.q_proj_weight)\n init_fn_(module.k_proj_weight)\n init_fn_(module.v_proj_weight)\n\n # bias\n if module.in_proj_bias is not None:\n torch.nn.init.zeros_(module.in_proj_bias)\n if module.bias_k is not None:\n torch.nn.init.zeros_(module.bias_k)\n if module.bias_v is not None:\n torch.nn.init.zeros_(module.bias_v)\n\n # out proj\n init_fn_(module.out_proj.weight)\n if init_div_is_residual is not False and getattr(\n module.out_proj, '_is_residual', False):\n with torch.no_grad():\n module.out_proj.weight.div_(div_is_residual)\n if module.out_proj.bias is not None:\n torch.nn.init.zeros_(module.out_proj.bias)\n\n elif te is not None and isinstance(module, te.LayerNormMLP):\n if module.layer_norm_weight is not None:\n torch.nn.init.ones_(module.layer_norm_weight) # type: ignore\n if module.layer_norm_bias is not None:\n torch.nn.init.zeros_(module.layer_norm_bias) # type: ignore\n\n init_fn_(module.fc1_weight)\n if module.fc1_bias is not None:\n assert isinstance(module.fc1_bias, torch.Tensor)\n torch.nn.init.zeros_(module.fc1_bias)\n init_fn_(module.fc2_weight)\n if module.fc2_bias is not None:\n assert isinstance(module.fc2_bias, torch.Tensor)\n torch.nn.init.zeros_(module.fc2_bias)\n\n with torch.no_grad():\n module.fc2_weight.div_(div_is_residual) # type: ignore\n\n else:\n for _ in module.parameters(recurse=False):\n # raise error if uninitialized module has any parameters\n raise NotImplementedError(\n f'{module.__class__.__name__} parameters are not initialized by param_init_fn.'\n )" }, { "identifier": "MODEL_INIT_REGISTRY", "path": "llmfoundry/models/utils/param_init_fns.py", "snippet": "MODEL_INIT_REGISTRY = {\n 'default_': torch_default_param_init_fn_,\n 'baseline_': baseline_param_init_fn_,\n 'kaiming_uniform_': kaiming_uniform_param_init_fn_,\n 'kaiming_normal_': kaiming_normal_param_init_fn_,\n 'neox_init_': neox_param_init_fn_,\n 'small_init_': small_param_init_fn_,\n 'xavier_uniform_': xavier_uniform_param_init_fn_,\n 'xavier_normal_': xavier_normal_param_init_fn_,\n}" } ]
import math import warnings import torch import torch.nn as nn import torch.nn.functional as F from typing import Any, List, Mapping, MutableMapping, Optional, Tuple, Union from composer.metrics import (InContextLearningLMAccuracy, InContextLearningLMExpectedCalibrationError, InContextLearningMCExpectedCalibrationError, InContextLearningMultipleChoiceAccuracy, InContextLearningQAAccuracy) from composer.metrics.nlp import LanguageCrossEntropy, LanguagePerplexity from composer.models import HuggingFaceModel from composer.utils import dist from omegaconf import DictConfig from omegaconf import OmegaConf as om from transformers import PreTrainedModel, PreTrainedTokenizerBase from transformers.modeling_outputs import (BaseModelOutputWithPast, CausalLMOutputWithPast) from llmfoundry.models.layers.attention import attn_bias_shape, build_attn_bias from llmfoundry.models.layers.blocks import MPTBlock from llmfoundry.models.layers.custom_embedding import SharedEmbedding from llmfoundry.models.layers.fc import FC_CLASS_REGISTRY as FC_CLASS_REGISTRY from llmfoundry.models.layers.ffn import \ FFN_CLASS_REGISTRY as FFN_CLASS_REGISTRY from llmfoundry.models.layers.ffn import MPTMLP as MPTMLP from llmfoundry.models.layers.ffn import build_ffn as build_ffn from llmfoundry.models.layers.norm import NORM_CLASS_REGISTRY from llmfoundry.models.mpt.configuration_mpt import MPTConfig from llmfoundry.models.utils.adapt_tokenizer import ( AutoTokenizerForMOD, # type: ignore (see note), adapt_tokenizer_for_denoising, # type: ignore (see note) ) from llmfoundry.models.utils.hf_prefixlm_converter import ( add_bidirectional_mask_if_missing, # type: ignore (see note) convert_hf_causal_lm_to_prefix_lm, # type: ignore (see note) ) from llmfoundry.models.utils.meta_init_context import \ init_empty_weights # type: ignore (see note) from llmfoundry.models.utils.param_init_fns import ( generic_param_init_fn_, # type: ignore (see note) MODEL_INIT_REGISTRY, ) from llmfoundry.models.layers.flash_attn_triton import flash_attn_func as flash_attn_func from flash_attn.losses.cross_entropy import CrossEntropyLoss as FusedCrossEntropyLoss # type: ignore # isort: skip
11,429
**kwargs: Any, ): if inputs_embeds is not None: raise NotImplementedError( 'inputs_embeds is not implemented for MPT yet') attention_mask = kwargs['attention_mask'].bool() if attention_mask[:, -1].sum() != attention_mask.shape[0]: raise NotImplementedError( 'MPT does not support generation with right padding.') if self.transformer.attn_uses_sequence_id and self.training: sequence_id = torch.zeros_like(input_ids[:1]) else: sequence_id = None if past_key_values is not None: input_ids = input_ids[:, -1].unsqueeze(-1) if self.transformer.prefix_lm: # Leverage a convenience of sequential generation! prefix_mask = torch.ones_like(attention_mask) # This requires that we're using the cache if kwargs.get('use_cache') == False: raise NotImplementedError( 'MPT with prefix_lm=True does not support use_cache=False.') else: prefix_mask = None return { 'input_ids': input_ids, 'attention_mask': attention_mask, 'prefix_mask': prefix_mask, 'sequence_id': sequence_id, 'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache', True), } @staticmethod def _reorder_cache(past_key_values: List[Tuple[torch.Tensor, torch.Tensor]], beam_idx: torch.LongTensor): """Used by HuggingFace generate when using beam search with kv-caching. See https://github.com/huggingface/transformers/blob/3ec7a47664ebe40c40f4b722f6bb1cd30c3821ec/src/transformers/models/gpt2/modeling_gpt2.py#L1122-L1133 for an example in transformers. """ reordered_past = [] for layer_past in past_key_values: reordered_past += [ tuple( past_state.index_select(0, beam_idx) for past_state in layer_past) ] return reordered_past class ComposerMPTCausalLM(HuggingFaceModel): def __init__( self, om_model_config: DictConfig, tokenizer: Optional[PreTrainedTokenizerBase] = None, ): resolved_om_model_config = om.to_container(om_model_config, resolve=True) hf_config = MPTConfig.from_dict(resolved_om_model_config) model = MPTForCausalLM(hf_config) train_metrics = [LanguageCrossEntropy(), LanguagePerplexity()] eval_metrics = [ LanguageCrossEntropy(), LanguagePerplexity(), InContextLearningLMAccuracy(), InContextLearningMultipleChoiceAccuracy(), InContextLearningQAAccuracy(), InContextLearningLMExpectedCalibrationError(), InContextLearningMCExpectedCalibrationError(), ] super().__init__( model=model, tokenizer=tokenizer, use_logits=True, metrics=train_metrics, eval_metrics=eval_metrics, shift_labels=True, allow_embedding_resizing=True, ) self.n_active_params = sum(p.numel() for p in self.parameters()) loss_fn_config = om_model_config.get('loss_fn', 'fused_crossentropy') if loss_fn_config == 'fused_crossentropy': try: if hf_config.verbose > 1: warnings.warn('Using Fused Cross Entropy Loss.') self.loss_fn = FusedCrossEntropyLoss(ignore_index=-100) except: raise ValueError( 'Fused Cross Entropy is not installed. Either (1) have a CUDA-compatible GPU ' + 'and `pip install .[gpu]` if installing from source or `pip install xentropy-cuda-lib@git+https://github.com/HazyResearch/[email protected]#subdirectory=csrc/xentropy` ' + 'if installing from pypi, or (2) set your config model.loss_fn=torch_crossentropy.' ) elif loss_fn_config == 'torch_crossentropy': self.loss_fn = nn.CrossEntropyLoss(ignore_index=-100) else: raise ValueError( f'Specified loss_fn={self.loss_fn} not recognized. `loss_fn` must be one of [`fused_crossentropy`, `torch_crossentropy`].' ) def get_targets(self, batch: Mapping): targets = torch.roll(batch['labels'], shifts=-1) targets[:, -1] = -100 return targets def forward(self, batch: MutableMapping): if self.model.transformer.prefix_lm:
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 """A simple, flexible implementation of a GPT model. Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py """ # NOTE: All utils are imported directly even if unused so that # HuggingFace can detect all the needed files to copy into its modules folder. # Otherwise, certain modules are missing. # isort: off try: except: pass # isort: on class MPTPreTrainedModel(PreTrainedModel): config_class = MPTConfig base_model_prefix = 'model' _no_split_modules = ['MPTBlock'] class MPTModel(MPTPreTrainedModel): def __init__(self, config: MPTConfig): config._validate_config() super().__init__(config) self.attn_impl = config.attn_config['attn_impl'] self.prefix_lm = config.attn_config['prefix_lm'] self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id'] self.alibi = config.attn_config['alibi'] self.alibi_bias_max = config.attn_config['alibi_bias_max'] self.learned_pos_emb = config.learned_pos_emb if config.init_device == 'mixed': if dist.get_local_rank() == 0: config.init_device = 'cpu' else: config.init_device = 'meta' if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys(): norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys()) raise NotImplementedError( f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).' ) norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()] # CogView (https://arxiv.org/abs/2105.13290) and GLM-130B (https://arxiv.org/abs/2210.02414) # both report this helping with stabilizing training self.embedding_fraction = config.embedding_fraction self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device) if self.learned_pos_emb: self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device) self.emb_drop = nn.Dropout(config.emb_pdrop) self.blocks = nn.ModuleList([ MPTBlock( device=config.init_device, **config.to_dict(), ) for _ in range(config.n_layers) ]) self.norm_f = norm_class(config.d_model, device=config.init_device) if config.init_device != 'meta': print( f'You are using {config.init_device=}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.' ) self.apply(self.param_init_fn) self.is_causal = not self.prefix_lm # define attn mask self._attn_bias_initialized = False self.attn_bias = None self.attn_bias_shape = attn_bias_shape( self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id, ) if config.no_bias: for module in self.modules(): if hasattr(module, 'bias') and isinstance( module.bias, nn.Parameter): if config.verbose: warnings.warn( f'Removing bias ({module.bias}) from {module}.') module.register_parameter('bias', None) # Print verbose info if config.verbose and config.verbose > 2: print(self) if 'verbose' not in self.config.init_config: self.config.init_config['verbose'] = self.config.verbose if self.config.init_config['verbose'] > 1: init_fn_name = self.config.init_config['name'] warnings.warn(f'Using {init_fn_name} initialization.') def get_input_embeddings(self): return self.wte def set_input_embeddings(self, value: nn.Embedding): self.wte = value @torch.no_grad() def _attn_bias( self, device: torch.device, dtype: torch.dtype, attention_mask: Optional[torch.ByteTensor] = None, prefix_mask: Optional[torch.ByteTensor] = None, sequence_id: Optional[torch.LongTensor] = None, ): if not self._attn_bias_initialized: if self.attn_bias_shape: self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype) self.attn_bias = build_attn_bias( self.attn_impl, self.attn_bias, self.config.n_heads, self.config.max_seq_len, causal=self.is_causal, alibi=self.alibi, alibi_bias_max=self.alibi_bias_max, ) self._attn_bias_initialized = True # flash does not support prefix_lm and will incorporate any # attention_mask inside the attention module if self.attn_impl == 'flash': return self.attn_bias, attention_mask if self.attn_bias is not None: # .to(*args, **kwargs) is a no-op if tensor is already on # specified device or of specificed dtype self.attn_bias = self.attn_bias.to(dtype=dtype, device=device) attn_bias = self.attn_bias # If using torch or triton, we incorporate the prefix_mask (if appropriate) if self.prefix_lm: assert isinstance(attn_bias, torch.Tensor) # pyright assert isinstance(prefix_mask, torch.Tensor) # pyright attn_bias = self._apply_prefix_mask(attn_bias, prefix_mask) # If using torch or triton, we incorporate sequence_id (if appropriate) if self.attn_uses_sequence_id and sequence_id is not None: assert isinstance(attn_bias, torch.Tensor) # pyright attn_bias = self._apply_sequence_id(attn_bias, sequence_id) # If using torch or triton, we incorporate attention_mask. This will output # None in place of attention_mask since it will not be further needed in the # attention modules. if attention_mask is not None: s_k = attention_mask.shape[-1] if attn_bias is None: attn_bias = torch.zeros((1, 1, 1, s_k), device=device, dtype=dtype) else: # clamp to 0 necessary for torch 2.0 compile() _s_k = max(0, attn_bias.size(-1) - s_k) attn_bias = attn_bias[:, :, :, _s_k:] if prefix_mask is not None and (attention_mask.shape != prefix_mask.shape): raise ValueError( f'attention_mask shape={attention_mask.shape} ' + f'and prefix_mask shape={prefix_mask.shape} are not equal.') min_val = torch.finfo(attn_bias.dtype).min attn_bias = attn_bias.masked_fill( ~attention_mask.view(-1, 1, 1, s_k), min_val) return attn_bias, None def _apply_prefix_mask(self, attn_bias: torch.Tensor, prefix_mask: torch.Tensor): s_k, s_q = attn_bias.shape[-2:] if (s_k != self.config.max_seq_len) or (s_q != self.config.max_seq_len): raise ValueError( 'attn_bias does not match the expected shape. ' + f'The last two dimensions should both be {self.config.max_length} ' + f'but are {s_k} and {s_q}.') seq_len = prefix_mask.shape[-1] if seq_len > self.config.max_seq_len: raise ValueError( f'prefix_mask sequence length cannot exceed max_seq_len={self.config.max_seq_len}' ) # select seq_len subset of attn mask attn_bias = attn_bias[..., :seq_len, :seq_len] # Mix the causal max and the bidirectional mask to get the full # allowable attention (i.e. full = not accounting for padding yet) causal = torch.tril( torch.ones((seq_len, seq_len), dtype=torch.bool, device=prefix_mask.device)).view(1, 1, seq_len, seq_len) prefix = prefix_mask.view(-1, 1, 1, seq_len) cannot_attend = ~torch.logical_or(causal, prefix.bool()) min_val = torch.finfo(attn_bias.dtype).min attn_bias = attn_bias.masked_fill(cannot_attend, min_val) return attn_bias def _apply_sequence_id(self, attn_bias: torch.Tensor, sequence_id: torch.LongTensor): seq_len = sequence_id.shape[-1] if seq_len > self.config.max_seq_len: raise ValueError( f'sequence_id sequence length cannot exceed max_seq_len={self.config.max_seq_len}' ) # select seq_len subset of attn mask attn_bias = attn_bias[..., :seq_len, :seq_len] # Restrict attention to tokens that share the same value # in sequence_id cannot_attend = torch.logical_not( torch.eq( sequence_id.view(-1, seq_len, 1), sequence_id.view(-1, 1, seq_len), )).unsqueeze(1) min_val = torch.finfo(attn_bias.dtype).min attn_bias = attn_bias.masked_fill(cannot_attend, min_val) return attn_bias def forward( self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]] = None, attention_mask: Optional[torch.ByteTensor] = None, prefix_mask: Optional[torch.ByteTensor] = None, sequence_id: Optional[torch.LongTensor] = None, return_dict: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, use_cache: Optional[bool] = None, inputs_embeds: Optional[torch.Tensor] = None, ): return_dict = (return_dict if return_dict is not None else self.config.return_dict) use_cache = (use_cache if use_cache is not None else self.config.use_cache) if attention_mask is not None: attention_mask = attention_mask.bool( ) # type: ignore (TODO to figure out the right type here) if prefix_mask is not None: prefix_mask = prefix_mask.bool( ) # type: ignore (TODO to figure out the right type here) # These args are passed in by keyword in huggingface's generate function # https://github.com/huggingface/transformers/blob/68287689f2f0d8b7063c400230b3766987abf18d/src/transformers/generation/utils.py#L2201-L2206 # but have not yet been fully implemented in MPTModel if not return_dict: raise NotImplementedError( 'return_dict False is not implemented yet for MPT') if output_attentions: if self.attn_impl != 'torch': raise NotImplementedError( 'output_attentions is not implemented for MPT when using attn_impl `flash` or `triton`.' ) if (attention_mask is not None and attention_mask[:, 0].sum() != attention_mask.shape[0] and self.training): raise NotImplementedError( 'MPT does not support training with left padding.') if self.prefix_lm and prefix_mask is None: raise ValueError( 'prefix_mask is a required argument when MPT is configured with prefix_lm=True.' ) # Raise a not implemented error if input_embeds is not None (this is an arg in huggingface transformers and we need to support it for PEFT) if inputs_embeds is not None: raise NotImplementedError( 'inputs_embeds is not implemented for MPT.') if self.training: if self.attn_uses_sequence_id and sequence_id is None: raise ValueError( 'sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True ' + 'and the model is in train mode.') elif (self.attn_uses_sequence_id is False) and (sequence_id is not None): warnings.warn( 'MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. ' + 'This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True.' ) S = input_ids.size(1) assert ( S <= self.config.max_seq_len ), f'Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}' tok_emb = self.wte(input_ids) # type: ignore if self.learned_pos_emb: past_position = 0 if past_key_values is not None: if len(past_key_values) != self.config.n_layers: raise ValueError( f'past_key_values must provide a past_key_value for each attention ' + f'layer in the network ({len(past_key_values)=}; {self.config.n_layers=}).' ) # For attn_impl: triton and flash the past key tensor spec is (batch, seq, dim). # For attn_impl: torch the past key tensor spec is (batch, heads, head_dim, seq). # Here we shift position embedding using the `seq` dim of the past key past_position = past_key_values[0][0].size(1) if self.attn_impl == 'torch': past_position = past_key_values[0][0].size(3) if S + past_position > self.config.max_seq_len: raise ValueError( f'Cannot forward input with past sequence length {past_position} and current sequence length ' + f'{S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}.' ) pos = torch.arange( past_position, S + past_position, dtype=torch.long, device=input_ids.device, ).unsqueeze(0) if attention_mask is not None: # adjust the position indices to account for padding tokens pos = torch.clamp( pos - torch.cumsum((~attention_mask).to(torch.int32), dim=1)[:, past_position:], min=0, ) pos_emb = self.wpe(pos) # type: ignore x = tok_emb + pos_emb else: # ALiBi and NoPE use this path (RoPE will also use this path if / when enabled) x = tok_emb if self.embedding_fraction == 1: x = self.emb_drop(x) # type: ignore else: # this implementation is proposed on page 7 of the GLM-130B paper https://arxiv.org/abs/2210.02414 x_shrunk = (x * self.embedding_fraction) + ( x.detach() * (1 - self.embedding_fraction)) assert isinstance(self.emb_drop, nn.Module) # pyright x = self.emb_drop(x_shrunk) attn_bias, attention_mask = self._attn_bias( device=x.device, dtype=torch.float32, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id, ) # initialize the past key values cache if it should be used if use_cache and past_key_values is None: past_key_values = [() for _ in range(self.config.n_layers) ] # type: ignore all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for b_idx, block in enumerate(self.blocks): # type: ignore if output_hidden_states: assert all_hidden_states is not None # pyright all_hidden_states = all_hidden_states + (x,) past_key_value = (past_key_values[b_idx] if past_key_values is not None else None) x, attn_weights, past_key_value = block( x, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=self.is_causal, ) if past_key_values is not None: past_key_values[b_idx] = past_key_value if output_attentions: assert all_self_attns is not None # pyright all_self_attns = all_self_attns + (attn_weights,) x = self.norm_f(x) # type: ignore # add hidden states from the last decoder layer if output_hidden_states: assert all_hidden_states is not None # pyright all_hidden_states = all_hidden_states + (x,) return BaseModelOutputWithPast( last_hidden_state=x, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, ) # Param Initialization, needed for device='meta' fast initialization def param_init_fn(self, module: nn.Module): init_fn_name = self.config.init_config['name'] MODEL_INIT_REGISTRY[init_fn_name]( module=module, n_layers=self.config.n_layers, d_model=self.config.d_model, **self.config.init_config, ) # FSDP Wrap function def fsdp_wrap_fn(self, module: nn.Module): return isinstance(module, MPTBlock) # Activation Checkpointing def activation_checkpointing_fn(self, module: nn.Module): return isinstance(module, MPTBlock) class MPTForCausalLM(MPTPreTrainedModel): def __init__(self, config: MPTConfig): super().__init__(config) if not config.tie_word_embeddings: raise ValueError( 'MPTForCausalLM only supports tied word embeddings') print(f'Instantiating an MPTForCausalLM model from {__file__}') self.transformer: MPTModel = MPTModel(config) for child in self.transformer.children(): if isinstance(child, torch.nn.ModuleList): continue if isinstance(child, torch.nn.Module): child._fsdp_wrap = True # enables scaling output logits; similar to a softmax "temperature" # PaLM paper uses scale 1/sqrt(config.d_model) self.logit_scale = None if config.logit_scale is not None: logit_scale = config.logit_scale if isinstance(logit_scale, str): if logit_scale == 'inv_sqrt_d_model': logit_scale = 1 / math.sqrt(config.d_model) else: raise ValueError( f"{logit_scale=} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'." ) self.logit_scale = logit_scale def get_input_embeddings(self): return self.transformer.wte def set_input_embeddings(self, value: Union[SharedEmbedding, nn.Embedding]): self.transformer.wte = value def get_output_embeddings(self): return self.transformer.wte def set_output_embeddings(self, new_embeddings: Union[SharedEmbedding, nn.Embedding]): self.transformer.wte = new_embeddings def set_decoder(self, decoder: MPTModel): self.transformer = decoder def get_decoder(self): return self.transformer def forward( self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]] = None, attention_mask: Optional[torch.ByteTensor] = None, prefix_mask: Optional[torch.ByteTensor] = None, sequence_id: Optional[torch.LongTensor] = None, labels: Optional[torch.LongTensor] = None, return_dict: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, use_cache: Optional[bool] = None, inputs_embeds: Optional[torch.FloatTensor] = None, ): return_dict = (return_dict if return_dict is not None else self.config.return_dict) use_cache = (use_cache if use_cache is not None else self.config.use_cache) # if input_embeds is not none, raise a not implemented error if inputs_embeds is not None: raise NotImplementedError( 'inputs_embeds has to be None (for hf/peft support).') # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.transformer( input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id, return_dict=return_dict, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=use_cache, ) # move outputs to same device as weights for token embedding # needed to support HF `device_map` logits = self.transformer.wte( outputs.last_hidden_state.to(self.transformer.wte.weight.device), True, ) if self.logit_scale is not None: if self.logit_scale == 0: warnings.warn( f'Multiplying logits by {self.logit_scale=}. This will produce uniform (uninformative) outputs.' ) logits *= self.logit_scale loss = None if labels is not None: _labels = torch.roll(labels, shifts=-1) _labels[:, -1] = -100 loss = F.cross_entropy( logits.view(-1, logits.size(-1)), _labels.to(logits.device).view(-1), ) return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) # Param Initialization, needed for device='meta' fast initialization def param_init_fn(self, module: nn.Module): init_fn_name = self.config.init_config['name'] MODEL_INIT_REGISTRY[init_fn_name]( module=module, n_layers=self.config.n_layers, d_model=self.config.d_model, **self.config.init_config, ) # FSDP Wrap function def fsdp_wrap_fn(self, module: nn.Module): return isinstance(module, MPTBlock) # Activation Checkpointing def activation_checkpointing_fn(self, module: nn.Module): return isinstance(module, MPTBlock) def prepare_inputs_for_generation( self, input_ids: torch.Tensor, past_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = None, inputs_embeds: Optional[torch.Tensor] = None, **kwargs: Any, ): if inputs_embeds is not None: raise NotImplementedError( 'inputs_embeds is not implemented for MPT yet') attention_mask = kwargs['attention_mask'].bool() if attention_mask[:, -1].sum() != attention_mask.shape[0]: raise NotImplementedError( 'MPT does not support generation with right padding.') if self.transformer.attn_uses_sequence_id and self.training: sequence_id = torch.zeros_like(input_ids[:1]) else: sequence_id = None if past_key_values is not None: input_ids = input_ids[:, -1].unsqueeze(-1) if self.transformer.prefix_lm: # Leverage a convenience of sequential generation! prefix_mask = torch.ones_like(attention_mask) # This requires that we're using the cache if kwargs.get('use_cache') == False: raise NotImplementedError( 'MPT with prefix_lm=True does not support use_cache=False.') else: prefix_mask = None return { 'input_ids': input_ids, 'attention_mask': attention_mask, 'prefix_mask': prefix_mask, 'sequence_id': sequence_id, 'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache', True), } @staticmethod def _reorder_cache(past_key_values: List[Tuple[torch.Tensor, torch.Tensor]], beam_idx: torch.LongTensor): """Used by HuggingFace generate when using beam search with kv-caching. See https://github.com/huggingface/transformers/blob/3ec7a47664ebe40c40f4b722f6bb1cd30c3821ec/src/transformers/models/gpt2/modeling_gpt2.py#L1122-L1133 for an example in transformers. """ reordered_past = [] for layer_past in past_key_values: reordered_past += [ tuple( past_state.index_select(0, beam_idx) for past_state in layer_past) ] return reordered_past class ComposerMPTCausalLM(HuggingFaceModel): def __init__( self, om_model_config: DictConfig, tokenizer: Optional[PreTrainedTokenizerBase] = None, ): resolved_om_model_config = om.to_container(om_model_config, resolve=True) hf_config = MPTConfig.from_dict(resolved_om_model_config) model = MPTForCausalLM(hf_config) train_metrics = [LanguageCrossEntropy(), LanguagePerplexity()] eval_metrics = [ LanguageCrossEntropy(), LanguagePerplexity(), InContextLearningLMAccuracy(), InContextLearningMultipleChoiceAccuracy(), InContextLearningQAAccuracy(), InContextLearningLMExpectedCalibrationError(), InContextLearningMCExpectedCalibrationError(), ] super().__init__( model=model, tokenizer=tokenizer, use_logits=True, metrics=train_metrics, eval_metrics=eval_metrics, shift_labels=True, allow_embedding_resizing=True, ) self.n_active_params = sum(p.numel() for p in self.parameters()) loss_fn_config = om_model_config.get('loss_fn', 'fused_crossentropy') if loss_fn_config == 'fused_crossentropy': try: if hf_config.verbose > 1: warnings.warn('Using Fused Cross Entropy Loss.') self.loss_fn = FusedCrossEntropyLoss(ignore_index=-100) except: raise ValueError( 'Fused Cross Entropy is not installed. Either (1) have a CUDA-compatible GPU ' + 'and `pip install .[gpu]` if installing from source or `pip install xentropy-cuda-lib@git+https://github.com/HazyResearch/[email protected]#subdirectory=csrc/xentropy` ' + 'if installing from pypi, or (2) set your config model.loss_fn=torch_crossentropy.' ) elif loss_fn_config == 'torch_crossentropy': self.loss_fn = nn.CrossEntropyLoss(ignore_index=-100) else: raise ValueError( f'Specified loss_fn={self.loss_fn} not recognized. `loss_fn` must be one of [`fused_crossentropy`, `torch_crossentropy`].' ) def get_targets(self, batch: Mapping): targets = torch.roll(batch['labels'], shifts=-1) targets[:, -1] = -100 return targets def forward(self, batch: MutableMapping): if self.model.transformer.prefix_lm:
add_bidirectional_mask_if_missing(batch)
12
2023-10-09 15:32:15+00:00
16k
jiangjiechen/auction-arena
auction_workflow.py
[ { "identifier": "Auctioneer", "path": "src/auctioneer_base.py", "snippet": "class Auctioneer(BaseModel):\n enable_discount: bool = False\n items: List[Item] = []\n cur_item: Item = None\n highest_bidder: Bidder = None\n highest_bid: int = -1\n bidding_history = defaultdict(list) # history about the bidding war of one item\n items_queue: List[Item] = [] # updates when a item is taken.\n auction_logs = defaultdict(list) # history about the bidding war of all items\n openai_cost = 0\n prev_round_max_bid: int = -1\n min_bid: int = 0\n fail_to_sell = False\n min_markup_pct = 0.1\n\n class Config:\n arbitrary_types_allowed = True\n \n def init_items(self, items: List[Item]):\n for item in items:\n # reset discounted price\n item.reset_price()\n self.items = items\n self.items_queue = items.copy()\n\n def summarize_items_info(self):\n desc = ''\n for item in self.items:\n desc += f\"- {item.get_desc()}\\n\"\n return desc.strip()\n \n def present_item(self):\n cur_item = self.items_queue.pop(0)\n self.cur_item = cur_item\n return cur_item\n \n def shuffle_items(self):\n random.shuffle(self.items)\n self.items_queue = self.items.copy()\n \n def record_bid(self, bid_info: dict, bid_round: int):\n '''\n Save the bidding history for each round, log the highest bidder and highest bidding\n '''\n # bid_info: {'bidder': xxx, 'bid': xxx, 'raw_msg': xxx}\n self.bidding_history[bid_round].append(bid_info)\n for hist in self.bidding_history[bid_round]:\n if hist['bid'] > 0:\n if self.highest_bid < hist['bid']:\n self.highest_bid = hist['bid']\n self.highest_bidder = hist['bidder']\n elif self.highest_bid == hist['bid']:\n # random if there's a tie\n self.highest_bidder = random.choice([self.highest_bidder, hist['bidder']])\n self.auction_logs[f\"{self.cur_item.get_desc()}\"].append(\n {'bidder': bid_info['bidder'], \n 'bid': bid_info['bid'], \n 'bid_round': bid_round})\n\n def _biddings_to_string(self, bid_round: int):\n '''\n Return a string that summarizes the bidding history in a round\n '''\n # bid_hist_text = '' if bid_round == 0 else f'- {self.highest_bidder}: ${self.highest_bid}\\n'\n bid_hist_text = ''\n for js in self.bidding_history[bid_round]:\n if js['bid'] < 0:\n bid_hist_text += f\"- {js['bidder']} withdrew\\n\"\n else:\n bid_hist_text += f\"- {js['bidder']}: ${js['bid']}\\n\"\n return bid_hist_text.strip()\n \n def all_bidding_history_to_string(self):\n bid_hist_text = ''\n for bid_round in self.bidding_history:\n bid_hist_text += f\"Round {bid_round}:\\n{self._biddings_to_string(bid_round)}\\n\\n\"\n return bid_hist_text.strip()\n\n def ask_for_bid(self, bid_round: int):\n '''\n Ask for bid, return the message to be sent to bidders\n '''\n if self.highest_bidder is None:\n if bid_round > 0:\n msg = f\"Seeing as we've had no takers at the initial price, we're going to lower the starting bid to ${self.cur_item.price} for {self.cur_item.name} to spark some interest! Do I have any takers?\"\n else:\n remaining_items = [self.cur_item.name] + [item.name for item in self.items_queue]\n msg = f\"Attention, bidders! {len(remaining_items)} item(s) left, they are: {', '.join(remaining_items)}.\\n\\nNow, please bid on {self.cur_item}. The starting price for bidding for {self.cur_item} is ${self.cur_item.price}. Anyone interested in this item?\"\n else:\n bidding_history = self._biddings_to_string(bid_round - 1)\n msg = f\"Thank you! This is the {p.ordinal(bid_round)} round of bidding for this item:\\n{bidding_history}\\n\\nNow we have ${self.highest_bid} from {self.highest_bidder.name} for {self.cur_item.name}. The minimum increase over this highest bid is ${int(self.cur_item.price * self.min_markup_pct)}. Do I have any advance on ${self.highest_bid}?\"\n return msg\n \n def ask_for_rebid(self, fail_msg: str, bid_price: int):\n return f\"Your bid of ${bid_price} failed, because {fail_msg}: You must reconsider your bid.\"\n\n def get_hammer_msg(self):\n if self.highest_bidder is None:\n return f\"Since no one bid on {self.cur_item.name}, we'll move on to the next item.\"\n else:\n return f\"Sold! {self.cur_item} to {self.highest_bidder} at ${self.highest_bid}! The true value for {self.cur_item} is ${self.cur_item.true_value}.\"# Thus {self.highest_bidder}'s profit by winning this item is ${self.cur_item.true_value - self.highest_bid}.\"\n\n def check_hammer(self, bid_round: int):\n # check if the item is sold\n self.fail_to_sell = False\n num_bid = self._num_bids_in_round(bid_round)\n\n # highest_bidder has already been updated in record_bid().\n # so when num_bid == 0 & highest_bidder is None, it means no one bid on this item\n if self.highest_bidder is None:\n if num_bid == 0:\n # failed to sell, as there is no highest bidder\n self.fail_to_sell = True\n if self.enable_discount and bid_round < 3:\n # lower the starting price by 50%. discoutn only applies to the first 3 rounds\n self.cur_item.lower_price(0.5)\n is_sold = False\n else:\n is_sold = True\n else:\n # won't happen\n raise ValueError(f\"highest_bidder is None but num_bid is {num_bid}\")\n else:\n if self.prev_round_max_bid < 0 and num_bid == 1:\n # only one bidder in the first round \n is_sold = True\n else:\n self.prev_round_max_bid = self.highest_bid\n is_sold = self._num_bids_in_round(bid_round) == 0\n return is_sold\n \n def _num_bids_in_round(self, bid_round: int):\n # check if there is no bid in the current round\n cnt = 0\n for hist in self.bidding_history[bid_round]:\n if hist['bid'] > 0:\n cnt += 1\n return cnt\n\n def hammer_fall(self):\n print(f'* Sold! {self.cur_item} (${self.cur_item.true_value}) goes to {self.highest_bidder} at ${self.highest_bid}.')\n self.auction_logs[f\"{self.cur_item.get_desc()}\"].append({\n 'bidder': self.highest_bidder, \n 'bid': f\"{self.highest_bid} (${self.cur_item.true_value})\", # no need for the first $, as it will be added in the self.log()\n 'bid_round': 'Hammer price (true value)'})\n self.cur_item = None\n self.highest_bidder = None\n self.highest_bid = -1\n self.bidding_history = defaultdict(list)\n self.prev_round_max_bid = -1\n self.fail_to_sell = False\n\n def end_auction(self):\n return len(self.items_queue) == 0\n \n def gather_all_status(self, bidders: List[Bidder]):\n status = {}\n for bidder in bidders:\n status[bidder.name] = {\n 'profit': bidder.profit, \n 'items_won': bidder.items_won\n }\n return status\n\n def parse_bid(self, text: str):\n prompt = PARSE_BID_INSTRUCTION.format(response=text)\n with get_openai_callback() as cb:\n llm = ChatOpenAI(model='gpt-3.5-turbo-0613', temperature=0)\n result = llm([HumanMessage(content=prompt)]).content\n self.openai_cost += cb.total_cost\n \n bid_number = re.findall(r'\\$?\\d+', result.replace(',', ''))\n # find number in the result\n if '-1' in result:\n return -1\n elif len(bid_number) > 0:\n return int(bid_number[-1].replace('$', ''))\n else:\n print('* Rebid:', text)\n return None\n\n def log(self, bidder_personal_reports: list = [], show_model_name=True):\n ''' example\n Apparatus H, starting at $1000.\n\n 1st bid:\n Bidder 1 (gpt-3.5-turbo-16k-0613): $1200\n Bidder 2 (gpt-3.5-turbo-16k-0613): $1100\n Bidder 3 (gpt-3.5-turbo-16k-0613): Withdrawn\n Bidder 4 (gpt-3.5-turbo-16k-0613): $1200\n \n 2nd bid:\n Bidder 1 (gpt-3.5-turbo-16k-0613): Withdrawn\n Bidder 2 (gpt-3.5-turbo-16k-0613): Withdrawn\n \n Hammer price:\n Bidder 4 (gpt-3.5-turbo-16k-0613): $1200\n '''\n markdown_output = \"## Auction Log\\n\\n\"\n for i, (item, bids) in enumerate(self.auction_logs.items()):\n markdown_output += f\"### {i+1}. {item}\\n\\n\"\n cur_bid_round = -1\n for i, bid in enumerate(bids):\n if bid['bid_round'] != cur_bid_round:\n cur_bid_round = bid['bid_round']\n if isinstance(bid['bid_round'], int):\n markdown_output += f\"\\n#### {p.ordinal(bid['bid_round']+1)} bid:\\n\\n\"\n else:\n markdown_output += f\"\\n#### {bid['bid_round']}:\\n\\n\"\n bid_price = f\"${bid['bid']}\" if bid['bid'] != -1 else 'Withdrew'\n if isinstance(bid['bidder'], Bidder) or isinstance(bid['bidder'], HumanBidder):\n if show_model_name:\n markdown_output += f\"* {bid['bidder']} ({bid['bidder'].model_name}): {bid_price}\\n\"\n else:\n markdown_output += f\"* {bid['bidder']}: {bid_price}\\n\"\n else:\n markdown_output += f\"* None bid\\n\"\n markdown_output += \"\\n\"\n \n if len(bidder_personal_reports) != 0:\n markdown_output += f\"\\n## Personal Report\"\n for report in bidder_personal_reports:\n markdown_output += f\"\\n\\n{report}\"\n return markdown_output.strip()\n \n def finish_auction(self):\n self.auction_logs = defaultdict(list)\n self.cur_item = None\n self.highest_bidder = None\n self.highest_bid = -1\n self.bidding_history = defaultdict(list)\n self.items_queue = []\n self.items = []\n self.prev_round_max_bid = -1\n self.fail_to_sell = False\n self.min_bid = 0" }, { "identifier": "Bidder", "path": "src/bidder_base.py", "snippet": "class Bidder(BaseModel):\n name: str\n model_name: str \n budget: int \n desire: str\n plan_strategy: str\n temperature: float = 0.7\n overestimate_percent: int = 10\n correct_belief: bool\n enable_learning: bool = False\n \n llm: BaseLanguageModel = None\n openai_cost = 0\n llm_token_count = 0\n \n verbose: bool = False\n auction_hash: str = ''\n\n system_message: str = ''\n original_budget: int = 0\n\n # working memory\n profit: int = 0\n cur_item_id = 0\n items: list = []\n dialogue_history: list = [] # for gradio UI display\n llm_prompt_history: list = [] # for tracking llm calling\n items_won = []\n bid_history: list = [] # history of the bidding of a single item\n plan_instruct: str = '' # instruction for planning\n cur_plan: str = '' # current plan\n status_quo: dict = {} # belief of budget and profit, self and others\n withdraw: bool = False # state of withdraw\n learnings: str = '' # learnings from previous biddings. If given, then use it to guide the rest of the auction.\n max_bid_cnt: int = 4 # Rule Bidder: maximum number of bids on one item (K = 1 starting bid + K-1 increase bid)\n rule_bid_cnt: int = 0 # Rule Bidder: count of bids on one item\n\n # belief tracking\n failed_bid_cnt: int = 0 # count of failed bids (overspending)\n total_bid_cnt: int = 0 # count of total bids\n self_belief_error_cnt: int = 0\n total_self_belief_cnt: int = 0\n other_belief_error_cnt: int = 0\n total_other_belief_cnt: int = 0\n \n engagement_count: int = 0\n budget_history = []\n profit_history = []\n budget_error_history = []\n profit_error_history = []\n win_bid_error_history = []\n engagement_history = defaultdict(int)\n all_bidders_status = {} # track others' profit\n changes_of_plan = []\n \n # not used\n input_box: str = None\n need_input = False\n semaphore = 0\n\n class Config:\n arbitrary_types_allowed = True\n\n def __repr__(self):\n return self.name\n\n def __str__(self):\n return self.name\n \n @classmethod\n def create(cls, **data):\n instance = cls(**data)\n instance._post_init()\n return instance\n\n def _post_init(self):\n self.original_budget = self.budget\n self.system_message = SYSTEM_MESSAGE.format(\n name=self.name,\n desire_desc=DESIRE_DESC[self.desire],\n )\n self._parse_llm()\n self.dialogue_history += [\n SystemMessage(content=self.system_message), \n AIMessage(content='')\n ]\n self.budget_history.append(self.budget)\n self.profit_history.append(self.profit)\n\n def _parse_llm(self):\n if 'gpt-' in self.model_name:\n self.llm = ChatOpenAI(model=self.model_name, temperature=self.temperature, max_retries=30, request_timeout=1200)\n elif 'claude' in self.model_name:\n self.llm = ChatAnthropic(model=self.model_name, temperature=self.temperature, default_request_timeout=1200)\n elif 'bison' in self.model_name:\n self.llm = ChatGooglePalm(model_name=f'models/{self.model_name}', temperature=self.temperature)\n elif 'rule' in self.model_name or 'human' in self.model_name:\n self.llm = None\n else:\n raise NotImplementedError(self.model_name)\n \n # def _rotate_openai_org(self):\n # # use two organizations to avoid rate limit\n # if os.environ.get('OPENAI_ORGANIZATION_1') and os.environ.get('OPENAI_ORGANIZATION_2'):\n # return random.choice([os.environ.get('OPENAI_ORGANIZATION_1'), os.environ.get('OPENAI_ORGANIZATION_2')])\n # else:\n # return None\n \n def _run_llm_standalone(self, messages: list):\n \n with get_openai_callback() as cb:\n for i in range(6):\n try:\n input_token_num = self.llm.get_num_tokens_from_messages(messages)\n if 'claude' in self.model_name: # anthropic's claude\n result = self.llm(messages, max_tokens_to_sample=2048)\n elif 'bison' in self.model_name: # google's palm-2\n max_tokens = min(max(3900 - input_token_num, 192), 2048)\n if isinstance(self.llm, ChatVertexAI):\n result = self.llm(messages, max_output_tokens=max_tokens)\n else:\n result = self.llm(messages)\n elif 'gpt' in self.model_name: # openai\n if 'gpt-3.5-turbo' in self.model_name and '16k' not in self.model_name:\n max_tokens = max(3900 - input_token_num, 192)\n else:\n # gpt-4\n # self.llm.openai_organization = self._rotate_openai_org()\n max_tokens = max(8000 - input_token_num, 192)\n result = self.llm(messages, max_tokens=max_tokens)\n elif 'llama' in self.model_name.lower():\n raise NotImplementedError\n else:\n raise NotImplementedError\n break\n except:\n print(f'Retrying for {self.model_name} ({i+1}/6), wait for {2**(i+1)} sec...')\n time.sleep(2**(i+1))\n self.openai_cost += cb.total_cost\n self.llm_token_count = self.llm.get_num_tokens_from_messages(messages)\n return result.content\n\n def _get_estimated_value(self, item):\n value = item.true_value * (1 + self.overestimate_percent / 100)\n return int(value)\n \n def _get_cur_item(self, key=None):\n if self.cur_item_id < len(self.items):\n if key is not None:\n return self.items[self.cur_item_id].__dict__[key]\n else:\n return self.items[self.cur_item_id]\n else:\n return 'no item left'\n \n def _get_next_item(self, key=None):\n if self.cur_item_id + 1 < len(self.items):\n if key is not None:\n return self.items[self.cur_item_id + 1].__dict__[key]\n else:\n return self.items[self.cur_item_id + 1]\n else:\n return 'no item left'\n \n def _get_remaining_items(self, as_str=False):\n remain_items = self.items[self.cur_item_id + 1:]\n if as_str:\n return ', '.join([item.name for item in remain_items])\n else:\n return remain_items\n \n def _get_items_value_str(self, items: List[Item]):\n if not isinstance(items, list):\n items = [items]\n items_info = ''\n for i, item in enumerate(items):\n estimated_value = self._get_estimated_value(item)\n _info = f\"{i+1}. {item}, starting price is ${item.price}. Your estimated value for this item is ${estimated_value}.\\n\"\n items_info += _info\n return items_info.strip()\n \n # ********** Main Instructions and Functions ********** #\n \n def learn_from_prev_auction(self, past_learnings, past_auction_log):\n if not self.enable_learning or 'rule' in self.model_name or 'human' in self.model_name:\n return ''\n \n instruct_learn = INSTRUCT_LEARNING_TEMPLATE.format(\n past_auction_log=past_auction_log,\n past_learnings=past_learnings)\n\n result = self._run_llm_standalone([HumanMessage(content=instruct_learn)])\n self.dialogue_history += [\n HumanMessage(content=instruct_learn),\n AIMessage(content=result),\n ]\n self.llm_prompt_history.append({\n 'messages': [{x.type: x.content} for x in [HumanMessage(content=instruct_learn)]],\n 'result': result,\n 'tag': 'learn_0'\n })\n \n self.learnings = '\\n'.join(extract_numbered_list(result))\n if self.learnings != '':\n self.system_message += f\"\\n\\nHere are your key learning points and practical tips from a previous auction. You can use them to guide this auction:\\n```\\n{self.learnings}\\n```\"\n \n if self.verbose:\n print(f\"Learn from previous auction: {self.name} ({self.model_name}).\")\n return result\n\n def _choose_items(self, budget, items: List[Item]):\n '''\n Choose items within budget for rule bidders.\n Cheap ones first if maximize_items, expensive ones first if maximize_profit.\n '''\n sorted_items = sorted(items, key=lambda x: self._get_estimated_value(x), \n reverse=self.desire == 'maximize_profit')\n \n chosen_items = []\n i = 0\n while budget >= 0 and i < len(sorted_items):\n item = sorted_items[i]\n if item.price <= budget:\n chosen_items.append(item)\n budget -= item.price\n i += 1\n \n return chosen_items\n \n def get_plan_instruct(self, items: List[Item]):\n self.items = items\n plan_instruct = INSTRUCT_PLAN_TEMPLATE.format(\n bidder_name=self.name, \n budget=self.budget, \n item_num=len(items), \n items_info=self._get_items_value_str(items), \n desire_desc=DESIRE_DESC[self.desire],\n learning_statement='' if not self.enable_learning else _LEARNING_STATEMENT\n )\n return plan_instruct\n \n def init_plan(self, plan_instruct: str):\n '''\n Plan for bidding with auctioneer's instruction and items information for customize estimated value.\n plan = plan(system_message, instruct_plan)\n '''\n if 'rule' in self.model_name: \n # self.cur_plan = ', '.join([x.name for x in self._choose_items(self.budget, self.items)])\n # self.dialogue_history += [\n # HumanMessage(content=plan_instruct),\n # AIMessage(content=self.cur_plan),\n # ]\n # return self.cur_plan\n return ''\n\n self.status_quo = {\n 'remaining_budget': self.budget,\n 'total_profits': {bidder: 0 for bidder in self.all_bidders_status.keys()},\n 'winning_bids': {bidder: {} for bidder in self.all_bidders_status.keys()},\n }\n\n if self.plan_strategy == 'none':\n self.plan_instruct = ''\n self.cur_plan = ''\n return None\n\n system_msg = SystemMessage(content=self.system_message)\n plan_msg = HumanMessage(content=plan_instruct)\n messages = [system_msg, plan_msg]\n result = self._run_llm_standalone(messages)\n \n if self.verbose:\n print(get_colored_text(plan_msg.content, 'red'))\n print(get_colored_text(result, 'green'))\n \n self.dialogue_history += [\n plan_msg,\n AIMessage(content=result),\n ]\n self.llm_prompt_history.append({\n 'messages': [{x.type: x.content} for x in messages],\n 'result': result,\n 'tag': 'plan_0'\n })\n self.cur_plan = result\n self.plan_instruct = plan_instruct\n \n self.changes_of_plan.append([\n f\"{self.cur_item_id} (Initial)\", \n False, \n json.dumps(extract_jsons_from_text(result)[-1]),\n ])\n \n if self.verbose:\n print(f\"Plan: {self.name} ({self.model_name}) for {self._get_cur_item()}.\")\n return result\n \n def get_rebid_instruct(self, auctioneer_msg: str):\n self.dialogue_history += [\n HumanMessage(content=auctioneer_msg),\n AIMessage(content='')\n ]\n return auctioneer_msg\n\n def get_bid_instruct(self, auctioneer_msg: str, bid_round: int):\n auctioneer_msg = auctioneer_msg.replace(self.name, f'You ({self.name})')\n \n bid_instruct = INSTRUCT_BID_TEMPLATE.format(\n auctioneer_msg=auctioneer_msg, \n bidder_name=self.name,\n cur_item=self._get_cur_item(),\n estimated_value=self._get_estimated_value(self._get_cur_item()),\n desire_desc=DESIRE_DESC[self.desire],\n learning_statement='' if not self.enable_learning else _LEARNING_STATEMENT\n )\n if bid_round == 0:\n if self.plan_strategy in ['static', 'none']:\n # if static planner, then no replanning is needed. status quo is updated in replanning. thus need to add status quo in bid instruct.\n bid_instruct = f\"\"\"The status quo of this auction so far is:\\n\"{json.dumps(self.status_quo, indent=4)}\"\\n\\n{bid_instruct}\\n---\\n\"\"\"\n else:\n bid_instruct = f'Now, the auctioneer says: \"{auctioneer_msg}\"'\n \n self.dialogue_history += [\n HumanMessage(content=bid_instruct),\n AIMessage(content='')\n ]\n return bid_instruct\n \n def bid_rule(self, cur_bid: int, min_markup_pct: float = 0.1):\n '''\n :param cur_bid: current highest bid\n :param min_markup_pct: minimum percentage for bid increase\n :param max_bid_cnt: maximum number of bids on one item (K = 1 starting bid + K-1 increase bid)\n '''\n # dialogue history already got bid_instruction.\n cur_item = self._get_cur_item()\n \n if cur_bid <= 0:\n next_bid = cur_item.price\n else:\n next_bid = cur_bid + min_markup_pct * cur_item.price\n \n if self.budget - next_bid >= 0 and self.rule_bid_cnt < self.max_bid_cnt:\n msg = int(next_bid)\n self.rule_bid_cnt += 1\n else:\n msg = -1\n \n content = f'The current highest bid for {cur_item.name} is ${cur_bid}. '\n content += \"I'm out!\" if msg < 0 else f\"I bid ${msg}! (Rule generated)\"\n self.dialogue_history += [\n HumanMessage(content=''),\n AIMessage(content=content)\n ]\n \n return msg\n \n def bid(self, bid_instruct):\n '''\n Bid for an item with auctioneer's instruction and bidding history.\n bid_history = bid(system_message, instruct_plan, plan, bid_history)\n '''\n if self.model_name == 'rule':\n return ''\n \n bid_msg = HumanMessage(content=bid_instruct)\n \n if self.plan_strategy == 'none':\n messages = [SystemMessage(content=self.system_message)]\n else:\n messages = [SystemMessage(content=self.system_message),\n HumanMessage(content=self.plan_instruct),\n AIMessage(content=self.cur_plan)]\n \n self.bid_history += [bid_msg]\n messages += self.bid_history\n \n result = self._run_llm_standalone(messages)\n \n self.bid_history += [AIMessage(content=result)]\n\n self.dialogue_history += [\n HumanMessage(content=''),\n AIMessage(content=result)\n ]\n \n self.llm_prompt_history.append({\n 'messages': [{x.type: x.content} for x in messages],\n 'result': result,\n 'tag': f'bid_{self.cur_item_id}'\n })\n \n if self.verbose:\n print(get_colored_text(bid_instruct, 'yellow'))\n print(get_colored_text(result, 'green'))\n \n print(f\"Bid: {self.name} ({self.model_name}) for {self._get_cur_item()}.\")\n self.total_bid_cnt += 1\n \n return result\n\n def get_summarize_instruct(self, bidding_history: str, hammer_msg: str, win_lose_msg: str):\n instruct = INSTRUCT_SUMMARIZE_TEMPLATE.format(\n cur_item=self._get_cur_item(), \n bidding_history=bidding_history, \n hammer_msg=hammer_msg.strip(), \n win_lose_msg=win_lose_msg.strip(), \n bidder_name=self.name,\n prev_status=self._status_json_to_text(self.status_quo),\n )\n return instruct\n\n def summarize(self, instruct_summarize: str):\n '''\n Update belief/status quo\n status_quo = summarize(system_message, bid_history, prev_status + instruct_summarize)\n '''\n self.budget_history.append(self.budget)\n self.profit_history.append(self.profit)\n \n if self.model_name == 'rule': \n self.rule_bid_cnt = 0 # reset bid count for rule bidder\n return ''\n \n messages = [SystemMessage(content=self.system_message)]\n # messages += self.bid_history\n summ_msg = HumanMessage(content=instruct_summarize)\n messages.append(summ_msg)\n\n status_quo_text = self._run_llm_standalone(messages)\n \n self.dialogue_history += [summ_msg, AIMessage(content=status_quo_text)]\n self.bid_history += [summ_msg, AIMessage(content=status_quo_text)]\n \n self.llm_prompt_history.append({\n 'messages': [{x.type: x.content} for x in messages],\n 'result': status_quo_text,\n 'tag': f'summarize_{self.cur_item_id}'\n })\n\n cnt = 0\n while cnt <= 3:\n sanity_msg = self._sanity_check_status_json(extract_jsons_from_text(status_quo_text)[-1])\n if sanity_msg == '':\n # pass sanity check then track beliefs\n consistency_msg = self._belief_tracking(status_quo_text)\n else:\n sanity_msg = f'- {sanity_msg}'\n consistency_msg = ''\n \n if sanity_msg != '' or (consistency_msg != '' and self.correct_belief):\n err_msg = f\"As {self.name}, here are some error(s) of your summary of the status JSON:\\n{sanity_msg.strip()}\\n{consistency_msg.strip()}\\n\\nPlease revise the status JSON based on the errors. Don't apologize. Just give me the revised status JSON.\".strip()\n \n # print(f\"{self.name}: revising status quo for the {cnt} time:\")\n # print(get_colored_text(err_msg, 'green'))\n # print(get_colored_text(status_quo_text, 'red'))\n \n messages += [AIMessage(content=status_quo_text), \n HumanMessage(content=err_msg)]\n status_quo_text = self._run_llm_standalone(messages)\n self.dialogue_history += [\n HumanMessage(content=err_msg),\n AIMessage(content=status_quo_text),\n ]\n cnt += 1\n else:\n break\n \n self.status_quo = extract_jsons_from_text(status_quo_text)[-1]\n\n if self.verbose:\n print(get_colored_text(instruct_summarize, 'blue'))\n print(get_colored_text(status_quo_text, 'green'))\n \n print(f\"Summarize: {self.name} ({self.model_name}) for {self._get_cur_item()}.\")\n \n return status_quo_text\n \n def get_replan_instruct(self):\n instruct = INSTRUCT_REPLAN_TEMPLATE.format(\n status_quo=self._status_json_to_text(self.status_quo),\n remaining_items_info=self._get_items_value_str(self._get_remaining_items()),\n bidder_name=self.name,\n desire_desc=DESIRE_DESC[self.desire],\n learning_statement='' if not self.enable_learning else _LEARNING_STATEMENT\n )\n return instruct\n\n def replan(self, instruct_replan: str):\n '''\n plan = replan(system_message, instruct_plan, prev_plan, status_quo + (learning) + instruct_replan)\n '''\n if self.model_name == 'rule': \n self.withdraw = False\n self.cur_item_id += 1\n return ''\n \n if self.plan_strategy in ['none', 'static']:\n self.bid_history = [] # clear bid history\n self.cur_item_id += 1\n self.withdraw = False\n return 'Skip replanning for bidders with static or no plan.'\n \n replan_msg = HumanMessage(content=instruct_replan)\n \n messages = [SystemMessage(content=self.system_message),\n HumanMessage(content=self.plan_instruct),\n AIMessage(content=self.cur_plan)]\n messages.append(replan_msg)\n\n result = self._run_llm_standalone(messages)\n \n new_plan_dict = extract_jsons_from_text(result)[-1]\n cnt = 0\n while len(new_plan_dict) == 0 and cnt < 2:\n err_msg = 'Your response does not contain a JSON-format priority list for items. Please revise your plan.'\n messages += [\n AIMessage(content=result),\n HumanMessage(content=err_msg),\n ]\n result = self._run_llm_standalone(messages)\n new_plan_dict = extract_jsons_from_text(result)[-1]\n \n self.dialogue_history += [\n HumanMessage(content=err_msg),\n AIMessage(content=result),\n ]\n cnt += 1\n \n old_plan_dict = extract_jsons_from_text(self.cur_plan)[-1]\n self.changes_of_plan.append([\n f\"{self.cur_item_id + 1} ({self._get_cur_item('name')})\", \n self._change_of_plan(old_plan_dict, new_plan_dict),\n json.dumps(new_plan_dict)\n ])\n \n self.plan_instruct = instruct_replan\n self.cur_plan = result\n self.withdraw = False\n self.bid_history = [] # clear bid history\n self.cur_item_id += 1\n\n self.dialogue_history += [\n replan_msg,\n AIMessage(content=result),\n ]\n self.llm_prompt_history.append({\n 'messages': [{x.type: x.content} for x in messages],\n 'result': result,\n 'tag': f'plan_{self.cur_item_id}'\n })\n \n if self.verbose:\n print(get_colored_text(instruct_replan, 'blue'))\n print(get_colored_text(result, 'green'))\n\n print(f\"Replan: {self.name} ({self.model_name}).\")\n return result\n \n def _change_of_plan(self, old_plan: dict, new_plan: dict):\n for k in new_plan:\n if new_plan[k] != old_plan.get(k, None):\n return True\n return False\n \n # *********** Belief Tracking and Sanity Check *********** #\n \n def bid_sanity_check(self, bid_price, prev_round_max_bid, min_markup_pct):\n # can't bid more than budget or less than previous highest bid\n if bid_price < 0:\n msg = None\n else:\n min_bid_increase = int(min_markup_pct * self._get_cur_item('price'))\n if bid_price > self.budget:\n msg = f\"you don't have insufficient budget (${self.budget} left)\"\n elif bid_price < self._get_cur_item('price'):\n msg = f\"your bid is lower than the starting bid (${self._get_cur_item('price')})\"\n elif bid_price < prev_round_max_bid + min_bid_increase:\n msg = f\"you must advance previous highest bid (${prev_round_max_bid}) by at least ${min_bid_increase} ({int(100 * min_markup_pct)}%).\"\n else:\n msg = None\n return msg\n\n def rebid_for_failure(self, fail_instruct: str):\n result = self.bid(fail_instruct)\n self.failed_bid_cnt += 1\n return result\n \n def _sanity_check_status_json(self, data: dict):\n if data == {}:\n return \"Error: No parsible JSON in your response. Possibly due to missing a closing curly bracket '}', or unpasible values (e.g., 'profit': 1000 + 400, instead of 'profit': 1400).\"\n\n # Check if all expected top-level keys are present\n expected_keys = [\"remaining_budget\", \"total_profits\", \"winning_bids\"]\n for key in expected_keys:\n if key not in data:\n return f\"Error: Missing '{key}' field in the status JSON.\"\n\n # Check if \"remaining_budget\" is a number\n if not isinstance(data[\"remaining_budget\"], (int, float)):\n return \"Error: 'remaining_budget' should be a number, and only about your remaining budget.\"\n\n # Check if \"total_profits\" is a dictionary with numbers as values\n if not isinstance(data[\"total_profits\"], dict):\n return \"Error: 'total_profits' should be a dictionary of every bidder.\"\n for bidder, profit in data[\"total_profits\"].items():\n if not isinstance(profit, (int, float)):\n return f\"Error: Profit for {bidder} should be a number.\"\n\n # Check if \"winning_bids\" is a dictionary and that each bidder's entry is a dictionary with numbers\n if not isinstance(data[\"winning_bids\"], dict):\n return \"Error: 'winning_bids' should be a dictionary.\"\n for bidder, bids in data[\"winning_bids\"].items():\n if not isinstance(bids, dict):\n return f\"Error: Bids for {bidder} should be a dictionary.\"\n for item, amount in bids.items():\n if not isinstance(amount, (int, float)):\n return f\"Error: Amount for {item} under {bidder} should be a number.\"\n\n # If everything is fine\n return \"\"\n \n def _status_json_to_text(self, data: dict):\n if 'rule' in self.model_name: return ''\n \n # Extract and format remaining budget\n structured_text = f\"* Remaining Budget: ${data.get('remaining_budget', 'unknown')}\\n\\n\"\n \n # Extract and format total profits for each bidder\n structured_text += \"* Total Profits:\\n\"\n if data.get('total_profits'):\n for bidder, profit in data['total_profits'].items():\n structured_text += f\" * {bidder}: ${profit}\\n\"\n \n # Extract and list the winning bids for each item by each bidder\n structured_text += \"\\n* Winning Bids:\\n\"\n if data.get('winning_bids'):\n for bidder, bids in data['winning_bids'].items():\n structured_text += f\" * {bidder}:\\n\"\n if bids:\n for item, amount in bids.items():\n structured_text += f\" * {item}: ${amount}\\n\"\n else:\n structured_text += f\" * No winning bids\\n\"\n \n return structured_text.strip()\n\n def _belief_tracking(self, status_text: str):\n '''\n Parse status quo and check if the belief is correct.\n '''\n belief_json = extract_jsons_from_text(status_text)[-1]\n # {\"remaining_budget\": 8000, \"total_profits\": {\"Bidder 1\": 1300, \"Bidder 2\": 1800, \"Bidder 3\": 0}, \"winning_bids\": {\"Bidder 1\": {\"Item 2\": 1200, \"Item 3\": 1000}, \"Bidder 2\": {\"Item 1\": 2000}, \"Bidder 3\": {}}}\n budget_belief = belief_json['remaining_budget']\n profits_belief = belief_json['total_profits']\n winning_bids = belief_json['winning_bids']\n\n msg = ''\n # track belief of budget\n self.total_self_belief_cnt += 1\n if budget_belief != self.budget:\n msg += f'- Your belief of budget is wrong: you have ${self.budget} left, but you think you have ${budget_belief} left.\\n'\n self.self_belief_error_cnt += 1\n self.budget_error_history.append([\n self._get_cur_item('name'),\n budget_belief,\n self.budget,\n ])\n \n # track belief of profits\n for bidder_name, profit in profits_belief.items():\n if self.all_bidders_status.get(bidder_name) is None:\n # due to a potentially unreasonable parsing\n continue\n \n if self.name in bidder_name: \n bidder_name = self.name\n self.total_self_belief_cnt += 1\n else:\n self.total_other_belief_cnt += 1\n \n real_profit = self.all_bidders_status[bidder_name]['profit']\n \n if profit != real_profit:\n if self.name == bidder_name:\n self.self_belief_error_cnt += 1\n else:\n self.other_belief_error_cnt += 1\n\n msg += f'- Your belief of total profit of {bidder_name} is wrong: {bidder_name} has earned ${real_profit} so far, but you think {bidder_name} has earned ${profit}.\\n'\n\n # add to history\n self.profit_error_history.append([\n f\"{bidder_name} ({self._get_cur_item('name')})\",\n profit,\n real_profit\n ])\n\n # track belief of winning bids\n for bidder_name, items_won_dict in winning_bids.items():\n if self.all_bidders_status.get(bidder_name) is None:\n # due to a potentially unreasonable parsing\n continue\n\n real_items_won = self.all_bidders_status[bidder_name]['items_won']\n # items_won = [(item, bid_price), ...)]\n \n items_won_list = list(items_won_dict.keys())\n real_items_won_list = [str(x) for x, _ in real_items_won]\n \n if self.name in bidder_name:\n self.total_self_belief_cnt += 1\n else:\n self.total_other_belief_cnt += 1\n \n if not item_list_equal(items_won_list, real_items_won_list):\n if bidder_name == self.name:\n self.self_belief_error_cnt += 1\n _bidder_name = f'you'\n else:\n self.other_belief_error_cnt += 1\n _bidder_name = bidder_name\n \n msg += f\"- Your belief of winning items of {bidder_name} is wrong: {bidder_name} won {real_items_won}, but you think {bidder_name} won {items_won_dict}.\\n\"\n\n self.win_bid_error_history.append([\n f\"{_bidder_name} ({self._get_cur_item('name')})\",\n ', '.join(items_won_list),\n ', '.join(real_items_won_list)\n ])\n \n return msg\n \n def win_bid(self, item: Item, bid: int):\n self.budget -= bid\n self.profit += item.true_value - bid\n self.items_won += [[item, bid]]\n msg = f\"Congratuations! You won {item} at ${bid}.\"# Now you have ${self.budget} left. Your total profit so far is ${self.profit}.\"\n return msg\n \n def lose_bid(self, item: Item):\n return f\"You lost {item}.\"# Now, you have ${self.budget} left. Your total profit so far is ${self.profit}.\"\n \n # set the profit information of other bidders\n def set_all_bidders_status(self, all_bidders_status: dict):\n self.all_bidders_status = all_bidders_status.copy()\n\n def set_withdraw(self, bid: int):\n if bid < 0: # withdraw\n self.withdraw = True\n elif bid == 0: # enable discount and bid again\n self.withdraw = False\n else: # normal bid\n self.withdraw = False\n self.engagement_count += 1\n self.engagement_history[self._get_cur_item('name')] += 1\n \n # ****************** Logging ****************** #\n \n # def _parse_hedging(self, plan: str): # deprecated\n # prompt = PARSE_HEDGE_INSTRUCTION.format(\n # item_name=self._get_cur_item(), \n # plan=plan)\n \n # with get_openai_callback() as cb:\n # llm = ChatOpenAI(model='gpt-3.5-turbo-0613', temperature=0)\n # result = llm([HumanMessage(content=prompt)]).content\n # self.openai_cost += cb.total_cost\n # # parse a number, which could be a digit\n # hedge_percent = re.findall(r'\\d+\\.?\\d*%', result)\n # if len(hedge_percent) > 0:\n # hedge_percent = hedge_percent[0].replace('%', '')\n # else:\n # hedge_percent = 0\n # return float(hedge_percent)\n \n def profit_report(self):\n '''\n Personal profit report at the end of an auction.\n '''\n msg = f\"* {self.name}, starting with ${self.original_budget}, has won {len(self.items_won)} items in this auction, with a total profit of ${self.profit}.:\\n\"\n profit = 0\n for item, bid in self.items_won:\n profit += item.true_value - bid\n msg += f\" * Won {item} at ${bid} over ${item.price}, with a true value of ${item.true_value}.\\n\"\n return msg.strip()\n \n def to_monitors(self, as_json=False):\n # budget, profit, items_won, tokens\n if len(self.items_won) == 0 and not as_json: \n items_won = [['', 0, 0]]\n else:\n items_won = []\n for item, bid in self.items_won:\n items_won.append([str(item), bid, item.true_value])\n \n profit_error_history = self.profit_error_history if self.profit_error_history != [] or as_json else [['', '', '']]\n win_bid_error_history = self.win_bid_error_history if self.win_bid_error_history != [] or as_json else [['', '', '']]\n budget_error_history = self.budget_error_history if self.budget_error_history != [] or as_json else [['', '']]\n changes_of_plan = self.changes_of_plan if self.changes_of_plan != [] or as_json else [['', '', '']]\n \n if as_json:\n return {\n 'auction_hash': self.auction_hash,\n 'bidder_name': self.name,\n 'model_name': self.model_name,\n 'desire': self.desire,\n 'plan_strategy': self.plan_strategy,\n 'overestimate_percent': self.overestimate_percent,\n 'temperature': self.temperature,\n 'correct_belief': self.correct_belief,\n 'enable_learning': self.enable_learning,\n 'budget': self.original_budget,\n 'money_left': self.budget,\n 'profit': self.profit,\n 'items_won': items_won,\n 'tokens_used': self.llm_token_count,\n 'openai_cost': round(self.openai_cost, 2),\n 'failed_bid_cnt': self.failed_bid_cnt,\n 'self_belief_error_cnt': self.self_belief_error_cnt,\n 'other_belief_error_cnt': self.other_belief_error_cnt,\n 'failed_bid_rate': round(self.failed_bid_cnt / (self.total_bid_cnt+1e-8), 2),\n 'self_error_rate': round(self.self_belief_error_cnt / (self.total_self_belief_cnt+1e-8), 2),\n 'other_error_rate': round(self.other_belief_error_cnt / (self.total_other_belief_cnt+1e-8), 2),\n 'engagement_count': self.engagement_count,\n 'engagement_history': self.engagement_history,\n 'changes_of_plan': changes_of_plan,\n 'budget_error_history': budget_error_history,\n 'profit_error_history': profit_error_history,\n 'win_bid_error_history': win_bid_error_history,\n 'history': self.llm_prompt_history\n }\n else:\n return [\n self.budget, \n self.profit, \n items_won, \n self.llm_token_count, \n round(self.openai_cost, 2), \n round(self.failed_bid_cnt / (self.total_bid_cnt+1e-8), 2), \n round(self.self_belief_error_cnt / (self.total_self_belief_cnt+1e-8), 2), \n round(self.other_belief_error_cnt / (self.total_other_belief_cnt+1e-8), 2), \n self.engagement_count,\n draw_plot(f\"{self.name} ({self.model_name})\", self.budget_history, self.profit_history), \n changes_of_plan,\n budget_error_history,\n profit_error_history, \n win_bid_error_history\n ]\n\n def dialogue_to_chatbot(self):\n # chatbot: [[Human, AI], [], ...]\n # only dialogue will be sent to LLMs. chatbot is just for display.\n assert len(self.dialogue_history) % 2 == 0\n chatbot = []\n for i in range(0, len(self.dialogue_history), 2):\n # if exceeds the length of dialogue, append the last message\n human_msg = self.dialogue_history[i].content\n ai_msg = self.dialogue_history[i+1].content\n if ai_msg == '': ai_msg = None\n if human_msg == '': human_msg = None\n chatbot.append([human_msg, ai_msg])\n return chatbot" }, { "identifier": "bidders_to_chatbots", "path": "src/bidder_base.py", "snippet": "def bidders_to_chatbots(bidder_list: List[Bidder], profit_report=False):\n if profit_report: # usually at the end of an auction\n return [x.dialogue_to_chatbot() + [[x.profit_report(), None]] for x in bidder_list]\n else:\n return [x.dialogue_to_chatbot() for x in bidder_list]" }, { "identifier": "bidding_multithread", "path": "src/bidder_base.py", "snippet": "def bidding_multithread(bidder_list: List[Bidder], \n instruction_list, \n func_type,\n thread_num=5,\n retry=1):\n '''\n auctioneer_msg: either a uniform message (str) or customed (list)\n '''\n assert func_type in ['plan', 'bid', 'summarize', 'replan']\n \n result_queue = queue.Queue()\n threads = []\n semaphore = threading.Semaphore(thread_num)\n\n def run_once(i: int, bidder: Bidder, auctioneer_msg: str):\n try:\n semaphore.acquire()\n if func_type == 'bid':\n \n result = bidder.bid(auctioneer_msg)\n elif func_type == 'summarize':\n result = bidder.summarize(auctioneer_msg)\n elif func_type == 'plan':\n result = bidder.init_plan(auctioneer_msg)\n elif func_type == 'replan':\n result = bidder.replan(auctioneer_msg)\n else:\n raise NotImplementedError(f'func_type {func_type} not implemented')\n result_queue.put((True, i, result))\n # except Exception as e:\n # result_queue.put((False, i, str(trace_back(e))))\n finally:\n semaphore.release()\n\n if isinstance(instruction_list, str):\n instruction_list = [instruction_list] * len(bidder_list)\n \n for i, (bidder, msg) in enumerate(zip(bidder_list, instruction_list)):\n thread = threading.Thread(target=run_once, args=(i, bidder, msg))\n thread.start()\n threads.append(thread)\n \n for thread in threads:\n thread.join(timeout=600)\n \n results = [result_queue.get() for _ in range(len(bidder_list))]\n \n errors = []\n for success, id, result in results:\n if not success:\n errors.append((id, result))\n \n if errors:\n raise Exception(f\"Error(s) in {func_type}:\\n\" + '\\n'.join([f'{i}: {e}' for i, e in errors]))\n \n valid_results = [x[1:] for x in results if x[0]]\n valid_results.sort()\n \n return [x for _, x in valid_results]" }, { "identifier": "trace_back", "path": "utils.py", "snippet": "def trace_back(error_msg):\n exc = traceback.format_exc()\n msg = f'[Error]: {error_msg}.\\n[Traceback]: {exc}'\n return msg" } ]
import os import time import gradio as gr import ujson as json import traceback import argparse from typing import List from tqdm import tqdm from src.auctioneer_base import Auctioneer from src.bidder_base import Bidder, bidders_to_chatbots, bidding_multithread from utils import trace_back from src.item_base import create_items from src.bidder_base import create_bidders from transformers import GPT2TokenizerFast
12,741
LOG_DIR = 'logs' enable_gr = gr.update(interactive=True) disable_gr = gr.update(interactive=False) def monitor_all(bidder_list: List[Bidder]): return sum([bidder.to_monitors() for bidder in bidder_list], [])
LOG_DIR = 'logs' enable_gr = gr.update(interactive=True) disable_gr = gr.update(interactive=False) def monitor_all(bidder_list: List[Bidder]): return sum([bidder.to_monitors() for bidder in bidder_list], [])
def parse_bid_price(auctioneer: Auctioneer, bidder: Bidder, msg: str):
0
2023-10-08 09:30:57+00:00
16k
SH1ROd/Bert-VITS2-Integration-train-txt-infer
train_ms.py
[ { "identifier": "TextAudioSpeakerLoader", "path": "data_utils.py", "snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from audio files.\n \"\"\"\n\n def __init__(self, audiopaths_sid_text, hparams):\n self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)\n self.max_wav_value = hparams.max_wav_value\n self.sampling_rate = hparams.sampling_rate\n self.filter_length = hparams.filter_length\n self.hop_length = hparams.hop_length\n self.win_length = hparams.win_length\n self.sampling_rate = hparams.sampling_rate\n self.spk_map = hparams.spk2id\n self.hparams = hparams\n\n self.use_mel_spec_posterior = getattr(hparams, \"use_mel_posterior_encoder\", False)\n if self.use_mel_spec_posterior:\n self.n_mel_channels = getattr(hparams, \"n_mel_channels\", 80)\n\n self.cleaned_text = getattr(hparams, \"cleaned_text\", False)\n\n self.add_blank = hparams.add_blank\n self.min_text_len = getattr(hparams, \"min_text_len\", 1)\n self.max_text_len = getattr(hparams, \"max_text_len\", 300)\n\n random.seed(1234)\n random.shuffle(self.audiopaths_sid_text)\n self._filter()\n\n def _filter(self):\n \"\"\"\n Filter text & store spec lengths\n \"\"\"\n # Store spectrogram lengths for Bucketing\n # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)\n # spec_length = wav_length // hop_length\n\n audiopaths_sid_text_new = []\n lengths = []\n skipped = 0\n for _id, spk, language, text, phones, tone, word2ph in self.audiopaths_sid_text:\n audiopath = f'{_id}'\n if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len:\n phones = phones.split(\" \")\n tone = [int(i) for i in tone.split(\" \")]\n word2ph = [int(i) for i in word2ph.split(\" \")]\n audiopaths_sid_text_new.append([audiopath, spk, language, text, phones, tone, word2ph])\n lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))\n else:\n skipped += 1\n print(\"skipped: \", skipped, \", total: \", len(self.audiopaths_sid_text))\n self.audiopaths_sid_text = audiopaths_sid_text_new\n self.lengths = lengths\n\n def get_audio_text_speaker_pair(self, audiopath_sid_text):\n # separate filename, speaker_id and text\n audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text\n\n bert, phones, tone, language = self.get_text(text, word2ph, phones, tone, language, audiopath)\n\n spec, wav = self.get_audio(audiopath)\n sid = torch.LongTensor([int(self.spk_map[sid])])\n return (phones, spec, wav, sid, tone, language, bert)\n\n def get_audio(self, filename):\n audio_norm, sampling_rate = torchaudio.load(filename, frame_offset=0, num_frames=-1, normalize=True, channels_first=True)\n '''\n audio, sampling_rate = load_wav_to_torch(filename)\n if sampling_rate != self.sampling_rate:\n raise ValueError(\"{} {} SR doesn't match target {} SR\".format(\n sampling_rate, self.sampling_rate))\n audio_norm = audio / self.max_wav_value\n audio_norm = audio_norm.unsqueeze(0)\n '''\n spec_filename = filename.replace(\".wav\", \".spec.pt\")\n if self.use_mel_spec_posterior:\n spec_filename = spec_filename.replace(\".spec.pt\", \".mel.pt\")\n if os.path.exists(spec_filename):\n spec = torch.load(spec_filename)\n else:\n if self.use_mel_spec_posterior:\n # if os.path.exists(filename.replace(\".wav\", \".spec.pt\")):\n # # spec, n_fft, num_mels, sampling_rate, fmin, fmax\n # spec = spec_to_mel_torch(\n # torch.load(filename.replace(\".wav\", \".spec.pt\")), \n # self.filter_length, self.n_mel_channels, self.sampling_rate,\n # self.hparams.mel_fmin, self.hparams.mel_fmax)\n spec = mel_spectrogram_torch(audio_norm, self.filter_length,\n self.n_mel_channels, self.sampling_rate, self.hop_length,\n self.win_length, self.hparams.mel_fmin, self.hparams.mel_fmax, center=False)\n else:\n spec = spectrogram_torch(audio_norm, self.filter_length,\n self.sampling_rate, self.hop_length, self.win_length,\n center=False)\n spec = torch.squeeze(spec, 0)\n torch.save(spec, spec_filename)\n return spec, audio_norm\n\n def get_text(self, text, word2ph, phone, tone, language_str, wav_path):\n # print(text, word2ph,phone, tone, language_str)\n pold = phone\n w2pho = [i for i in word2ph]\n word2ph = [i for i in word2ph]\n phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)\n pold2 = phone\n\n if self.add_blank:\n p1 = len(phone)\n phone = commons.intersperse(phone, 0)\n p2 = len(phone)\n t1 = len(tone)\n tone = commons.intersperse(tone, 0)\n t2 = len(tone)\n language = commons.intersperse(language, 0)\n for i in range(len(word2ph)):\n word2ph[i] = word2ph[i] * 2\n word2ph[0] += 1\n bert_path = wav_path.replace(\".wav\", \".bert.pt\")\n try:\n bert = torch.load(bert_path)\n assert bert.shape[-1] == len(phone)\n except:\n bert = get_bert(text, word2ph, language_str)\n torch.save(bert, bert_path)\n #print(bert.shape[-1], bert_path, text, pold)\n assert bert.shape[-1] == len(phone)\n\n assert bert.shape[-1] == len(phone), (\n bert.shape, len(phone), sum(word2ph), p1, p2, t1, t2, pold, pold2, word2ph, text, w2pho)\n phone = torch.LongTensor(phone)\n tone = torch.LongTensor(tone)\n language = torch.LongTensor(language)\n return bert, phone, tone, language\n\n def get_sid(self, sid):\n sid = torch.LongTensor([int(sid)])\n return sid\n\n def __getitem__(self, index):\n return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])\n\n def __len__(self):\n return len(self.audiopaths_sid_text)" }, { "identifier": "TextAudioSpeakerCollate", "path": "data_utils.py", "snippet": "class TextAudioSpeakerCollate():\n \"\"\" Zero-pads model inputs and targets\n \"\"\"\n\n def __init__(self, return_ids=False):\n self.return_ids = return_ids\n\n def __call__(self, batch):\n \"\"\"Collate's training batch from normalized text, audio and speaker identities\n PARAMS\n ------\n batch: [text_normalized, spec_normalized, wav_normalized, sid]\n \"\"\"\n # Right zero-pad all one-hot text sequences to max input length\n _, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([x[1].size(1) for x in batch]),\n dim=0, descending=True)\n\n max_text_len = max([len(x[0]) for x in batch])\n max_spec_len = max([x[1].size(1) for x in batch])\n max_wav_len = max([x[2].size(1) for x in batch])\n\n text_lengths = torch.LongTensor(len(batch))\n spec_lengths = torch.LongTensor(len(batch))\n wav_lengths = torch.LongTensor(len(batch))\n sid = torch.LongTensor(len(batch))\n\n text_padded = torch.LongTensor(len(batch), max_text_len)\n tone_padded = torch.LongTensor(len(batch), max_text_len)\n language_padded = torch.LongTensor(len(batch), max_text_len)\n bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n\n spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)\n wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)\n text_padded.zero_()\n tone_padded.zero_()\n language_padded.zero_()\n spec_padded.zero_()\n wav_padded.zero_()\n bert_padded.zero_()\n for i in range(len(ids_sorted_decreasing)):\n row = batch[ids_sorted_decreasing[i]]\n\n text = row[0]\n text_padded[i, :text.size(0)] = text\n text_lengths[i] = text.size(0)\n\n spec = row[1]\n spec_padded[i, :, :spec.size(1)] = spec\n spec_lengths[i] = spec.size(1)\n\n wav = row[2]\n wav_padded[i, :, :wav.size(1)] = wav\n wav_lengths[i] = wav.size(1)\n\n sid[i] = row[3]\n\n tone = row[4]\n tone_padded[i, :tone.size(0)] = tone\n\n language = row[5]\n language_padded[i, :language.size(0)] = language\n\n bert = row[6]\n bert_padded[i, :, :bert.size(1)] = bert\n\n return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, tone_padded, language_padded, bert_padded" }, { "identifier": "DistributedBucketSampler", "path": "data_utils.py", "snippet": "class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):\n \"\"\"\n Maintain similar input lengths in a batch.\n Length groups are specified by boundaries.\n Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.\n\n It removes samples which are not included in the boundaries.\n Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.\n \"\"\"\n\n def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n self.lengths = dataset.lengths\n self.batch_size = batch_size\n self.boundaries = boundaries\n\n self.buckets, self.num_samples_per_bucket = self._create_buckets()\n self.total_size = sum(self.num_samples_per_bucket)\n self.num_samples = self.total_size // self.num_replicas\n\n def _create_buckets(self):\n buckets = [[] for _ in range(len(self.boundaries) - 1)]\n for i in range(len(self.lengths)):\n length = self.lengths[i]\n idx_bucket = self._bisect(length)\n if idx_bucket != -1:\n buckets[idx_bucket].append(i)\n\n for i in range(len(buckets) - 1, 0, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n\n num_samples_per_bucket = []\n for i in range(len(buckets)):\n len_bucket = len(buckets[i])\n total_batch_size = self.num_replicas * self.batch_size\n rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size\n num_samples_per_bucket.append(len_bucket + rem)\n return buckets, num_samples_per_bucket\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n if self.shuffle:\n for bucket in self.buckets:\n indices.append(torch.randperm(len(bucket), generator=g).tolist())\n else:\n for bucket in self.buckets:\n indices.append(list(range(len(bucket))))\n\n batches = []\n for i in range(len(self.buckets)):\n bucket = self.buckets[i]\n len_bucket = len(bucket)\n if (len_bucket == 0):\n continue\n ids_bucket = indices[i]\n num_samples_bucket = self.num_samples_per_bucket[i]\n\n # add extra samples to make it evenly divisible\n rem = num_samples_bucket - len_bucket\n ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)]\n\n # subsample\n ids_bucket = ids_bucket[self.rank::self.num_replicas]\n\n # batching\n for j in range(len(ids_bucket) // self.batch_size):\n batch = [bucket[idx] for idx in ids_bucket[j * self.batch_size:(j + 1) * self.batch_size]]\n batches.append(batch)\n\n if self.shuffle:\n batch_ids = torch.randperm(len(batches), generator=g).tolist()\n batches = [batches[i] for i in batch_ids]\n self.batches = batches\n\n assert len(self.batches) * self.batch_size == self.num_samples\n return iter(self.batches)\n\n def _bisect(self, x, lo=0, hi=None):\n if hi is None:\n hi = len(self.boundaries) - 1\n\n if hi > lo:\n mid = (hi + lo) // 2\n if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:\n return mid\n elif x <= self.boundaries[mid]:\n return self._bisect(x, lo, mid)\n else:\n return self._bisect(x, mid + 1, hi)\n else:\n return -1\n\n def __len__(self):\n return self.num_samples // self.batch_size" }, { "identifier": "SynthesizerTrn", "path": "models.py", "snippet": "class SynthesizerTrn(nn.Module):\n \"\"\"\n Synthesizer for Training\n \"\"\"\n\n def __init__(self,\n n_vocab,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n n_speakers=256,\n gin_channels=256,\n use_sdp=True,\n n_flow_layer = 4,\n n_layers_trans_flow = 3,\n flow_share_parameter = False,\n use_transformer_flow = True,\n **kwargs):\n\n super().__init__()\n self.n_vocab = n_vocab\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.n_speakers = n_speakers\n self.gin_channels = gin_channels\n self.n_layers_trans_flow = n_layers_trans_flow\n self.use_spk_conditioned_encoder = kwargs.get(\"use_spk_conditioned_encoder\", True)\n self.use_sdp = use_sdp\n self.use_noise_scaled_mas = kwargs.get(\"use_noise_scaled_mas\", False)\n self.mas_noise_scale_initial = kwargs.get(\"mas_noise_scale_initial\", 0.01)\n self.noise_scale_delta = kwargs.get(\"noise_scale_delta\", 2e-6)\n self.current_mas_noise_scale = self.mas_noise_scale_initial\n if self.use_spk_conditioned_encoder and gin_channels > 0:\n self.enc_gin_channels = gin_channels\n self.enc_p = TextEncoder(n_vocab,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n gin_channels=self.enc_gin_channels)\n self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates,\n upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)\n self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16,\n gin_channels=gin_channels)\n if use_transformer_flow:\n self.flow = TransformerCouplingBlock(inter_channels, hidden_channels, filter_channels, n_heads, n_layers_trans_flow, 5, p_dropout, n_flow_layer, gin_channels=gin_channels,share_parameter= flow_share_parameter)\n else:\n self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, n_flow_layer, gin_channels=gin_channels)\n self.sdp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)\n self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)\n \n if n_speakers >= 1:\n self.emb_g = nn.Embedding(n_speakers, gin_channels)\n else:\n self.ref_enc = ReferenceEncoder(spec_channels, gin_channels)\n\n def forward(self, x, x_lengths, y, y_lengths, sid, tone, language, bert):\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1,2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert,g=g)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n\n with torch.no_grad():\n # negative cross-entropy\n s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]\n neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s]\n neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2),\n s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s]\n neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4\n if self.use_noise_scaled_mas:\n epsilon = torch.std(neg_cent) * torch.randn_like(neg_cent) * self.current_mas_noise_scale\n neg_cent = neg_cent + epsilon\n\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()\n\n w = attn.sum(2)\n\n l_length_sdp = self.sdp(x, x_mask, w, g=g)\n l_length_sdp = l_length_sdp / torch.sum(x_mask)\n \n logw_ = torch.log(w + 1e-6) * x_mask\n logw = self.dp(x, x_mask, g=g)\n l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(x_mask) # for averaging\n\n l_length = l_length_dp + l_length_sdp\n\n # expand prior\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)\n\n z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)\n o = self.dec(z_slice, g=g)\n return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (x, logw, logw_)\n \n def infer(self, x, x_lengths, sid, tone, language, bert, noise_scale=.667, length_scale=1, noise_scale_w=0.8, max_len=None, sdp_ratio=0,y=None):\n #x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert)\n # g = self.gst(y)\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1,2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert,g=g)\n logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * (sdp_ratio) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio)\n w = torch.exp(logw) * x_mask * length_scale\n w_ceil = torch.ceil(w)\n y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = commons.generate_path(w_ceil, attn_mask)\n\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1,\n 2) # [b, t', t], [b, t, d] -> [b, d, t']\n\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n z = self.flow(z_p, y_mask, g=g, reverse=True)\n o = self.dec((z * y_mask)[:, :, :max_len], g=g)\n return o, attn, y_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "MultiPeriodDiscriminator", "path": "models.py", "snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self, use_spectral_norm=False):\n super(MultiPeriodDiscriminator, self).__init__()\n periods = [2, 3, 5, 7, 11]\n\n discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]\n discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]\n self.discriminators = nn.ModuleList(discs)\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n y_d_gs.append(y_d_g)\n fmap_rs.append(fmap_r)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs" }, { "identifier": "DurationDiscriminator", "path": "models.py", "snippet": "class DurationDiscriminator(nn.Module): #vits2\n def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):\n super().__init__()\n\n self.in_channels = in_channels\n self.filter_channels = filter_channels\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.gin_channels = gin_channels\n\n self.drop = nn.Dropout(p_dropout)\n self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2)\n self.norm_1 = modules.LayerNorm(filter_channels)\n self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)\n self.norm_2 = modules.LayerNorm(filter_channels)\n self.dur_proj = nn.Conv1d(1, filter_channels, 1)\n\n self.pre_out_conv_1 = nn.Conv1d(2*filter_channels, filter_channels, kernel_size, padding=kernel_size//2)\n self.pre_out_norm_1 = modules.LayerNorm(filter_channels)\n self.pre_out_conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)\n self.pre_out_norm_2 = modules.LayerNorm(filter_channels)\n\n if gin_channels != 0:\n self.cond = nn.Conv1d(gin_channels, in_channels, 1)\n\n self.output_layer = nn.Sequential(\n nn.Linear(filter_channels, 1), \n nn.Sigmoid() \n )\n\n def forward_probability(self, x, x_mask, dur, g=None):\n dur = self.dur_proj(dur)\n x = torch.cat([x, dur], dim=1)\n x = self.pre_out_conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_1(x)\n x = self.drop(x)\n x = self.pre_out_conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_2(x)\n x = self.drop(x)\n x = x * x_mask\n x = x.transpose(1, 2)\n output_prob = self.output_layer(x)\n return output_prob\n\n def forward(self, x, x_mask, dur_r, dur_hat, g=None):\n x = torch.detach(x)\n if g is not None:\n g = torch.detach(g)\n x = x + self.cond(g)\n x = self.conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.norm_1(x)\n x = self.drop(x)\n x = self.conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.norm_2(x)\n x = self.drop(x)\n\n output_probs = []\n for dur in [dur_r, dur_hat]:\n output_prob = self.forward_probability(x, x_mask, dur, g)\n output_probs.append(output_prob)\n\n return output_probs" }, { "identifier": "generator_loss", "path": "losses.py", "snippet": "def generator_loss(disc_outputs):\n loss = 0\n gen_losses = []\n for dg in disc_outputs:\n dg = dg.float()\n l = torch.mean((1-dg)**2)\n gen_losses.append(l)\n loss += l\n\n return loss, gen_losses" }, { "identifier": "discriminator_loss", "path": "losses.py", "snippet": "def discriminator_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n r_losses = []\n g_losses = []\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n dr = dr.float()\n dg = dg.float()\n r_loss = torch.mean((1-dr)**2)\n g_loss = torch.mean(dg**2)\n loss += (r_loss + g_loss)\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, r_losses, g_losses" }, { "identifier": "feature_loss", "path": "losses.py", "snippet": "def feature_loss(fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n rl = rl.float().detach()\n gl = gl.float()\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2 " }, { "identifier": "kl_loss", "path": "losses.py", "snippet": "def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):\n \"\"\"\n z_p, logs_q: [b, h, t_t]\n m_p, logs_p: [b, h, t_t]\n \"\"\"\n z_p = z_p.float()\n logs_q = logs_q.float()\n m_p = m_p.float()\n logs_p = logs_p.float()\n z_mask = z_mask.float()\n\n kl = logs_p - logs_q - 0.5\n kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p)\n kl = torch.sum(kl * z_mask)\n l = kl / torch.sum(z_mask)\n return l" }, { "identifier": "mel_spectrogram_torch", "path": "mel_processing.py", "snippet": "def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):\n if torch.min(y) < -1.:\n print('min value is ', torch.min(y))\n if torch.max(y) > 1.:\n print('max value is ', torch.max(y))\n\n global mel_basis, hann_window\n dtype_device = str(y.dtype) + '_' + str(y.device)\n fmax_dtype_device = str(fmax) + '_' + dtype_device\n wnsize_dtype_device = str(win_size) + '_' + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)\n if wnsize_dtype_device not in hann_window:\n hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)\n\n y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')\n y = y.squeeze(1)\n\n spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],\n center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)\n\n spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)\n\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec" }, { "identifier": "spec_to_mel_torch", "path": "mel_processing.py", "snippet": "def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):\n global mel_basis\n dtype_device = str(spec.dtype) + '_' + str(spec.device)\n fmax_dtype_device = str(fmax) + '_' + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n return spec" }, { "identifier": "symbols", "path": "text/symbols.py", "snippet": "" } ]
import os import json import argparse import itertools import math import torch import shutil import torch.multiprocessing as mp import torch.distributed as dist import logging import commons import utils from torch import nn, optim from torch.nn import functional as F from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torch.nn.parallel import DistributedDataParallel as DDP from torch.cuda.amp import autocast, GradScaler from tqdm import tqdm from data_utils import ( TextAudioSpeakerLoader, TextAudioSpeakerCollate, DistributedBucketSampler ) from models import ( SynthesizerTrn, MultiPeriodDiscriminator, DurationDiscriminator, ) from losses import ( generator_loss, discriminator_loss, feature_loss, kl_loss ) from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from text.symbols import symbols
10,901
_, optim_d, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d, skip_optimizer=not hps.cont) epoch_str = max(epoch_str, 1) global_step = (epoch_str - 1) * len(train_loader) except Exception as e: print(e) epoch_str = 1 global_step = 0 else: _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "G_*.pth"), net_g, optim_g, True) _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "D_*.pth"), net_d, optim_d, True) scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) if net_dur_disc is not None: scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR(optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str-2) else: scheduler_dur_disc = None scaler = GradScaler(enabled=hps.train.fp16_run) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval],role=role) else: train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None, role=role) scheduler_g.step() scheduler_d.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, role): net_g, net_d, net_dur_disc = nets optim_g, optim_d, optim_dur_disc = optims scheduler_g, scheduler_d, scheduler_dur_disc = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in tqdm(enumerate(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True) spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True) y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True) speakers = speakers.cuda(rank, non_blocking=True) tone = tone.cuda(rank, non_blocking=True) language = language.cuda(rank, non_blocking=True) bert = bert.cuda(rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): y_hat, l_length, attn, ids_slice, x_mask, z_mask, \ (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_) = net_g(x, x_lengths, spec, spec_lengths, speakers, tone, language, bert) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax) y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax ) y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice # Discriminator y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) with autocast(enabled=False): loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g) loss_disc_all = loss_disc if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach()) with autocast(enabled=False): # TODO: I think need to mean using the mask, but for now, just mean all loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g = discriminator_loss(y_dur_hat_r, y_dur_hat_g) loss_dur_disc_all = loss_dur_disc optim_dur_disc.zero_grad() scaler.scale(loss_dur_disc_all).backward() scaler.unscale_(optim_dur_disc) grad_norm_dur_disc = commons.clip_grad_value_(net_dur_disc.parameters(), None) scaler.step(optim_dur_disc) optim_d.zero_grad() scaler.scale(loss_disc_all).backward() scaler.unscale_(optim_d) grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) scaler.step(optim_d) with autocast(enabled=hps.train.fp16_run): # Generator y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_) with autocast(enabled=False): loss_dur = torch.sum(l_length.float()) loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
logging.getLogger('numba').setLevel(logging.WARNING) torch.backends.cudnn.benchmark = True torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True torch.set_float32_matmul_precision('medium') global_step = 0 def main(): """Assume Single Node Multi GPUs Training Only""" assert torch.cuda.is_available(), "CPU training is not allowed." n_gpus = torch.cuda.device_count() os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = '65280' hps = utils.get_hparams() role='' for t in hps.data.spk2id.items(): role=t[0] if not hps.cont: folder_path = f"./logs/{role}" if not os.path.exists(folder_path): os.makedirs(folder_path) print(f"文件夹 '{role}' 已创建在 './logs/' 目录下。") else: print(f"文件夹 '{role}' 已经存在于 './logs/' 目录下。") shutil.copy('./pretrained_models/D_0.pth',f'./logs/{role}/D_0.pth') shutil.copy('./pretrained_models/G_0.pth',f'./logs/{role}/G_0.pth') shutil.copy('./pretrained_models/DUR_0.pth',f'./logs/{role}/DUR_0.pth') mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps, role)) def run(rank, n_gpus, hps, role): global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) dist.init_process_group(backend= 'gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus, rank=rank) torch.manual_seed(hps.train.seed) torch.cuda.set_device(rank) train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 400, 500, 600, 700, 800, 900, 1000], num_replicas=n_gpus, rank=rank, shuffle=True) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader(train_dataset, num_workers=2, shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler) if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False, collate_fn=collate_fn) if "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas == True: print("Using noise scaled MAS for VITS2") use_noise_scaled_mas = True mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") use_noise_scaled_mas = False mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator == True: print("Using duration discriminator for VITS2") use_duration_discriminator = True net_dur_disc = DurationDiscriminator( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(rank) if "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder == True: if hps.data.n_speakers == 0: raise ValueError("n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model") use_spk_conditioned_encoder = True else: print("Using normal encoder for VITS1") use_spk_conditioned_encoder = False net_g = SynthesizerTrn( len(symbols), hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, mas_noise_scale_initial = mas_noise_scale_initial, noise_scale_delta = noise_scale_delta, **hps.model).cuda(rank) freeze_enc = getattr(hps.model, "freeze_enc", False) if freeze_enc: print("freeze encoder !!!") for param in net_g.enc_p.parameters(): param.requires_grad = False net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) optim_g = torch.optim.AdamW( filter(lambda p: p.requires_grad, net_g.parameters()), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps) optim_d = torch.optim.AdamW( net_d.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps) if net_dur_disc is not None: optim_dur_disc = torch.optim.AdamW( net_dur_disc.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps) else: optim_dur_disc = None net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) if net_dur_disc is not None: net_dur_disc = DDP(net_dur_disc, device_ids=[rank], find_unused_parameters=True) pretrain_dir = None if pretrain_dir is None: try: if net_dur_disc is not None: _, optim_dur_disc, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=not hps.cont) _, optim_g, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g, skip_optimizer=not hps.cont) _, optim_d, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d, skip_optimizer=not hps.cont) epoch_str = max(epoch_str, 1) global_step = (epoch_str - 1) * len(train_loader) except Exception as e: print(e) epoch_str = 1 global_step = 0 else: _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "G_*.pth"), net_g, optim_g, True) _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "D_*.pth"), net_d, optim_d, True) scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) if net_dur_disc is not None: scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR(optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str-2) else: scheduler_dur_disc = None scaler = GradScaler(enabled=hps.train.fp16_run) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval],role=role) else: train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None, role=role) scheduler_g.step() scheduler_d.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, role): net_g, net_d, net_dur_disc = nets optim_g, optim_d, optim_dur_disc = optims scheduler_g, scheduler_d, scheduler_dur_disc = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in tqdm(enumerate(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True) spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True) y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True) speakers = speakers.cuda(rank, non_blocking=True) tone = tone.cuda(rank, non_blocking=True) language = language.cuda(rank, non_blocking=True) bert = bert.cuda(rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): y_hat, l_length, attn, ids_slice, x_mask, z_mask, \ (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_) = net_g(x, x_lengths, spec, spec_lengths, speakers, tone, language, bert) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax) y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax ) y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice # Discriminator y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) with autocast(enabled=False): loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g) loss_disc_all = loss_disc if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach()) with autocast(enabled=False): # TODO: I think need to mean using the mask, but for now, just mean all loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g = discriminator_loss(y_dur_hat_r, y_dur_hat_g) loss_dur_disc_all = loss_dur_disc optim_dur_disc.zero_grad() scaler.scale(loss_dur_disc_all).backward() scaler.unscale_(optim_dur_disc) grad_norm_dur_disc = commons.clip_grad_value_(net_dur_disc.parameters(), None) scaler.step(optim_dur_disc) optim_d.zero_grad() scaler.scale(loss_disc_all).backward() scaler.unscale_(optim_d) grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) scaler.step(optim_d) with autocast(enabled=hps.train.fp16_run): # Generator y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_) with autocast(enabled=False): loss_dur = torch.sum(l_length.float()) loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
loss_fm = feature_loss(fmap_r, fmap_g)
8
2023-10-10 02:23:23+00:00
16k
sakemin/cog-musicgen-chord
audiocraft/modules/conditioners.py
[ { "identifier": "ChromaExtractor", "path": "audiocraft/modules/chroma.py", "snippet": "class ChromaExtractor(nn.Module):\n \"\"\"Chroma extraction and quantization.\n\n Args:\n sample_rate (int): Sample rate for the chroma extraction.\n n_chroma (int): Number of chroma bins for the chroma extraction.\n radix2_exp (int): Size of stft window for the chroma extraction (power of 2, e.g. 12 -> 2^12).\n nfft (int, optional): Number of FFT.\n winlen (int, optional): Window length.\n winhop (int, optional): Window hop size.\n argmax (bool, optional): Whether to use argmax. Defaults to False.\n norm (float, optional): Norm for chroma normalization. Defaults to inf.\n \"\"\"\n def __init__(self, sample_rate: int, n_chroma: int = 12, radix2_exp: int = 12, nfft: tp.Optional[int] = None,\n winlen: tp.Optional[int] = None, winhop: tp.Optional[int] = None, argmax: bool = False,\n norm: float = torch.inf):\n super().__init__()\n self.winlen = winlen or 2 ** radix2_exp\n self.nfft = nfft or self.winlen\n self.winhop = winhop or (self.winlen // 4)\n self.sample_rate = sample_rate\n self.n_chroma = n_chroma\n self.norm = norm\n self.argmax = argmax\n self.register_buffer('fbanks', torch.from_numpy(filters.chroma(sr=sample_rate, n_fft=self.nfft, tuning=0,\n n_chroma=self.n_chroma)), persistent=False)\n self.spec = torchaudio.transforms.Spectrogram(n_fft=self.nfft, win_length=self.winlen,\n hop_length=self.winhop, power=2, center=True,\n pad=0, normalized=True)\n\n def forward(self, wav: torch.Tensor) -> torch.Tensor:\n T = wav.shape[-1]\n # in case we are getting a wav that was dropped out (nullified)\n # from the conditioner, make sure wav length is no less that nfft\n if T < self.nfft:\n pad = self.nfft - T\n r = 0 if pad % 2 == 0 else 1\n wav = F.pad(wav, (pad // 2, pad // 2 + r), 'constant', 0)\n assert wav.shape[-1] == self.nfft, f\"expected len {self.nfft} but got {wav.shape[-1]}\"\n\n spec = self.spec(wav).squeeze(1)\n raw_chroma = torch.einsum('cf,...ft->...ct', self.fbanks, spec)\n norm_chroma = torch.nn.functional.normalize(raw_chroma, p=self.norm, dim=-2, eps=1e-6)\n norm_chroma = rearrange(norm_chroma, 'b d t -> b t d')\n\n if self.argmax:\n idx = norm_chroma.argmax(-1, keepdim=True)\n norm_chroma[:] = 0\n norm_chroma.scatter_(dim=-1, index=idx, value=1)\n\n return norm_chroma" }, { "identifier": "ChordExtractor", "path": "audiocraft/modules/chord_chroma.py", "snippet": "class ChordExtractor(nn.Module):\n\n def __init__(self, device, sample_rate, max_duration, chroma_len, n_chroma, winhop):\n super().__init__()\n self.config = HParams.load(\"/src/audiocraft/modules/btc/run_config.yaml\") #gotta specify the path for run_config.yaml of btc\n\n # self.config.feature['large_voca'] = False\n # self.config.model['num_chords'] = 25\n\n self.model_file = '/src/audiocraft/modules/btc/test/btc_model_large_voca.pt'\n # self.model_file = 'audiocraft/modules/btc/test/btc_model.pt'\n self.idx_to_chord = idx2voca_chord()\n self.sr = sample_rate\n\n self.n_chroma = n_chroma\n self.max_duration = max_duration\n self.chroma_len = chroma_len\n self.to_timebin = self.max_duration/self.chroma_len\n self.timebin = winhop\n\n self.chords = chords.Chords()\n self.device = device\n\n self.denoise_window_size = 7\n self.denoise_threshold = 0.5\n \n self.model = BTC_model(config=self.config.model).to(device)\n if os.path.isfile(self.model_file):\n checkpoint = torch.load(self.model_file)\n self.mean = checkpoint['mean']\n self.std = checkpoint['std']\n self.model.load_state_dict(checkpoint['model'])\n\n def forward(self, wavs:torch.Tensor) -> torch.Tensor:\n sr = self.config.mp3['song_hz']\n chromas = []\n for wav in wavs:\n original_wav = librosa.resample(wav.cpu().numpy(), orig_sr=self.sr, target_sr=sr)\n original_wav = original_wav.squeeze(0)\n # print(original_wav.shape)\n T = original_wav.shape[-1]\n # in case we are getting a wav that was dropped out (nullified)\n # from the conditioner, make sure wav length is no less that nfft\n if T < self.timebin//4:\n pad = self.timebin//4 - T\n r = 0 if pad % 2 == 0 else 1\n original_wav = F.pad(torch.Tensor(original_wav), (pad // 2, pad // 2 + r), 'constant', 0)\n original_wav = original_wav.numpy()\n assert original_wav.shape[-1] == self.timebin//4, f\"expected len {self.timebin//4} but got {original_wav.shape[-1]}\"\n # print(original_wav.shape)\n #preprocess\n currunt_sec_hz = 0\n\n while len(original_wav) > currunt_sec_hz + self.config.mp3['song_hz'] * self.config.mp3['inst_len']:\n start_idx = int(currunt_sec_hz)\n end_idx = int(currunt_sec_hz + self.config.mp3['song_hz'] * self.config.mp3['inst_len'])\n tmp = librosa.cqt(original_wav[start_idx:end_idx], sr=sr, n_bins=self.config.feature['n_bins'], bins_per_octave=self.config.feature['bins_per_octave'], hop_length=self.config.feature['hop_length'])\n if start_idx == 0:\n feature = tmp\n else:\n feature = np.concatenate((feature, tmp), axis=1)\n currunt_sec_hz = end_idx\n \n if currunt_sec_hz == 0:\n feature = librosa.cqt(original_wav[currunt_sec_hz:], sr=sr, n_bins=self.config.feature['n_bins'], bins_per_octave=self.config.feature['bins_per_octave'], hop_length=self.config.feature['hop_length'])\n else:\n tmp = librosa.cqt(original_wav[currunt_sec_hz:], sr=sr, n_bins=self.config.feature['n_bins'], bins_per_octave=self.config.feature['bins_per_octave'], hop_length=self.config.feature['hop_length'])\n feature = np.concatenate((feature, tmp), axis=1)\n # print(feature.shape)\n feature = np.log(np.abs(feature) + 1e-6)\n # print(feature)\n feature_per_second = self.config.mp3['inst_len'] / self.config.model['timestep']\n song_length_second = len(original_wav)/self.config.mp3['song_hz']\n\n feature = feature.T\n feature = (feature - self.mean)/self.std\n\n time_unit = feature_per_second\n n_timestep = self.config.model['timestep']\n\n num_pad = n_timestep - (feature.shape[0] % n_timestep)\n feature = np.pad(feature, ((0, num_pad), (0, 0)), mode=\"constant\", constant_values=0)\n num_instance = feature.shape[0] // n_timestep\n\n #inference\n start_time = 0.0\n lines = []\n with torch.no_grad():\n self.model.eval()\n feature = torch.tensor(feature, dtype=torch.float32).unsqueeze(0).to(self.device)\n for t in range(num_instance):\n self_attn_output, _ = self.model.self_attn_layers(feature[:, n_timestep * t:n_timestep * (t + 1), :])\n prediction, _ = self.model.output_layer(self_attn_output)\n prediction = prediction.squeeze()\n for i in range(n_timestep):\n if t == 0 and i == 0:\n prev_chord = prediction[i].item()\n continue\n if prediction[i].item() != prev_chord:\n lines.append(\n '%.3f %.3f %s\\n' % (start_time, time_unit * (n_timestep * t + i), self.idx_to_chord[prev_chord]))\n start_time = time_unit * (n_timestep * t + i)\n prev_chord = prediction[i].item()\n if t == num_instance - 1 and i + num_pad == n_timestep:\n if start_time != time_unit * (n_timestep * t + i):\n lines.append('%.3f %.3f %s\\n' % (start_time, time_unit * (n_timestep * t + i), self.idx_to_chord[prev_chord]))\n break\n\n strlines = ''.join(lines)\n\n chroma = []\n\n count = 0\n for line in lines:\n if count >= self.chroma_len: \n break\n splits = line.split()\n if len(splits) == 3:\n s = splits[0]\n e = splits[1]\n l = splits[2]\n\n crd = self.chords.chord(l)\n \n if crd[0] == -1:\n multihot = torch.Tensor(crd[2])\n else:\n multihot = torch.concat([torch.Tensor(crd[2])[-crd[0]:],torch.Tensor(crd[2])[:-crd[0]]])\n start_bin = round(float(s)/self.to_timebin)\n end_bin = round(float(e)/self.to_timebin)\n for j in range(start_bin,end_bin):\n if count >= self.chroma_len: \n break\n chroma.append(multihot)\n count += 1\n \n chroma = torch.stack(chroma, dim=0)\n\n # Denoising chroma\n kernel = torch.ones(self.denoise_window_size)/self.denoise_window_size\n\n filtered_signals = []\n for i in range(chroma.shape[-1]):\n filtered_signals.append(torch.nn.functional.conv1d(chroma[...,i].unsqueeze(0),\n kernel.unsqueeze(0).unsqueeze(0).to(chroma.device), \n padding=(self.denoise_window_size - 1) // 2))\n filtered_signals = torch.stack(filtered_signals, dim=-1)\n filtered_signals = filtered_signals > self.denoise_threshold\n\n chromas.append(filtered_signals.squeeze(0))\n \n return torch.stack(chromas, dim=0).to(self.device)" }, { "identifier": "StreamingModule", "path": "audiocraft/modules/streaming.py", "snippet": "class StreamingModule(nn.Module):\n \"\"\"Common API for streaming components.\n\n Each streaming component has a streaming state, which is just a dict[str, Tensor].\n By convention, the first dim of each tensor must be the batch size.\n Don't use dots in the key names, as this would clash with submodules\n (like in state_dict).\n\n If `self._is_streaming` is True, the component should use and remember\n the proper state inside `self._streaming_state`.\n\n To set a streaming component in streaming state, use\n\n with module.streaming():\n ...\n\n This will automatically reset the streaming state when exiting the context manager.\n This also automatically propagates to all streaming children module.\n\n Some module might also implement the `StreamingModule.flush` method, although\n this one is trickier, as all parents module must be StreamingModule and implement\n it as well for it to work properly. See `StreamingSequential` after.\n \"\"\"\n def __init__(self) -> None:\n super().__init__()\n self._streaming_state: State = {}\n self._is_streaming = False\n\n def _apply_named_streaming(self, fn: tp.Any):\n for name, module in self.named_modules():\n if isinstance(module, StreamingModule):\n fn(name, module)\n\n def _set_streaming(self, streaming: bool):\n def _set_streaming(name, module):\n module._is_streaming = streaming\n self._apply_named_streaming(_set_streaming)\n\n @contextmanager\n def streaming(self):\n \"\"\"Context manager to enter streaming mode. Reset streaming state on exit.\"\"\"\n self._set_streaming(True)\n try:\n yield\n finally:\n self._set_streaming(False)\n self.reset_streaming()\n\n def reset_streaming(self):\n \"\"\"Reset the streaming state.\"\"\"\n def _reset(name: str, module: StreamingModule):\n module._streaming_state.clear()\n\n self._apply_named_streaming(_reset)\n\n def get_streaming_state(self) -> State:\n \"\"\"Return the streaming state, including that of sub-modules.\"\"\"\n state: State = {}\n\n def _add(name: str, module: StreamingModule):\n if name:\n name += \".\"\n for key, value in module._streaming_state.items():\n state[name + key] = value\n\n self._apply_named_streaming(_add)\n return state\n\n def set_streaming_state(self, state: State):\n \"\"\"Set the streaming state, including that of sub-modules.\"\"\"\n state = dict(state)\n\n def _set(name: str, module: StreamingModule):\n if name:\n name += \".\"\n module._streaming_state.clear()\n for key, value in list(state.items()):\n # complexity is not ideal here, but probably fine.\n if key.startswith(name):\n local_key = key[len(name):]\n if '.' not in local_key:\n module._streaming_state[local_key] = value\n del state[key]\n\n self._apply_named_streaming(_set)\n assert len(state) == 0, list(state.keys())\n\n def flush(self, x: tp.Optional[torch.Tensor] = None):\n \"\"\"Flush any remaining outputs that were waiting for completion.\n Typically, for convolutions, this will add the final padding\n and process the last buffer.\n\n This should take an optional argument `x`, which will be provided\n if a module before this one in the streaming pipeline has already\n spitted out a flushed out buffer.\n \"\"\"\n if x is None:\n return None\n else:\n return self(x)" }, { "identifier": "create_sin_embedding", "path": "audiocraft/modules/transformer.py", "snippet": "def create_sin_embedding(positions: torch.Tensor, dim: int, max_period: float = 10000,\n dtype: torch.dtype = torch.float32) -> torch.Tensor:\n \"\"\"Create sinusoidal positional embedding, with shape `[B, T, C]`.\n\n Args:\n positions (torch.Tensor): LongTensor of positions.\n dim (int): Dimension of the embedding.\n max_period (float): Maximum period of the cosine/sine functions.\n dtype (torch.dtype or str): dtype to use to generate the embedding.\n Returns:\n torch.Tensor: Sinusoidal positional embedding.\n \"\"\"\n # We aim for BTC format\n assert dim % 2 == 0\n half_dim = dim // 2\n positions = positions.to(dtype)\n adim = torch.arange(half_dim, device=positions.device, dtype=dtype).view(1, 1, -1)\n max_period_tensor = torch.full([], max_period, device=positions.device, dtype=dtype) # avoid sync point\n phase = positions / (max_period_tensor ** (adim / (half_dim - 1)))\n return torch.cat([torch.cos(phase), torch.sin(phase)], dim=-1)" }, { "identifier": "audio_read", "path": "audiocraft/data/audio.py", "snippet": "def audio_read(filepath: tp.Union[str, Path], seek_time: float = 0.,\n duration: float = -1., pad: bool = False) -> tp.Tuple[torch.Tensor, int]:\n \"\"\"Read audio by picking the most appropriate backend tool based on the audio format.\n\n Args:\n filepath (str or Path): Path to audio file to read.\n seek_time (float): Time at which to start reading in the file.\n duration (float): Duration to read from the file. If set to -1, the whole file is read.\n pad (bool): Pad output audio if not reaching expected duration.\n Returns:\n tuple of torch.Tensor, int: Tuple containing audio data and sample rate.\n \"\"\"\n fp = Path(filepath)\n if fp.suffix in ['.flac', '.ogg']: # TODO: check if we can safely use av_read for .ogg\n # There is some bug with ffmpeg and reading flac\n info = _soundfile_info(filepath)\n frames = -1 if duration <= 0 else int(duration * info.sample_rate)\n frame_offset = int(seek_time * info.sample_rate)\n wav, sr = soundfile.read(filepath, start=frame_offset, frames=frames, dtype=np.float32)\n assert info.sample_rate == sr, f\"Mismatch of sample rates {info.sample_rate} {sr}\"\n wav = torch.from_numpy(wav).t().contiguous()\n if len(wav.shape) == 1:\n wav = torch.unsqueeze(wav, 0)\n else:\n wav, sr = _av_read(filepath, seek_time, duration)\n if pad and duration > 0:\n expected_frames = int(duration * sr)\n wav = F.pad(wav, (0, expected_frames - wav.shape[-1]))\n return wav, sr" }, { "identifier": "SegmentInfo", "path": "audiocraft/data/audio_dataset.py", "snippet": "class SegmentInfo(BaseInfo):\n meta: AudioMeta\n seek_time: float\n # The following values are given once the audio is processed, e.g.\n # at the target sample rate and target number of channels.\n n_frames: int # actual number of frames without padding\n total_frames: int # total number of frames, padding included\n sample_rate: int # actual sample rate\n channels: int # number of audio channels." }, { "identifier": "convert_audio", "path": "audiocraft/data/audio_utils.py", "snippet": "def convert_audio(wav: torch.Tensor, from_rate: float,\n to_rate: float, to_channels: int) -> torch.Tensor:\n \"\"\"Convert audio to new sample rate and number of audio channels.\"\"\"\n wav = julius.resample_frac(wav, int(from_rate), int(to_rate))\n wav = convert_audio_channels(wav, to_channels)\n return wav" }, { "identifier": "AudioCraftEnvironment", "path": "audiocraft/environment.py", "snippet": "class AudioCraftEnvironment:\n \"\"\"Environment configuration for teams and clusters.\n\n AudioCraftEnvironment picks compute cluster settings (slurm, dora) from the current running environment\n or declared variable and the loaded team configuration. Additionally, the AudioCraftEnvironment\n provides pointers to a reference folder resolved automatically across clusters that is shared across team members,\n allowing to share sigs or other files to run jobs. Finally, it provides dataset mappers to automatically\n map dataset file paths to new locations across clusters, allowing to use the same manifest of files across cluters.\n\n The cluster type is identified automatically and base configuration file is read from config/teams.yaml.\n Use the following environment variables to specify the cluster, team or configuration:\n\n AUDIOCRAFT_CLUSTER (optional): Cluster type to enforce. Useful if the cluster type\n cannot be inferred automatically.\n AUDIOCRAFT_CONFIG (optional): Path to yaml config holding the teams configuration.\n If not set, configuration is read from config/teams.yaml.\n AUDIOCRAFT_TEAM (optional): Name of the team. Recommended to set to your own team.\n Cluster configuration are shared across teams to match compute allocation,\n specify your cluster configuration in the configuration file under a key mapping\n your team name.\n \"\"\"\n _instance = None\n DEFAULT_TEAM = \"default\"\n\n def __init__(self) -> None:\n \"\"\"Loads configuration.\"\"\"\n self.team: str = os.getenv(\"AUDIOCRAFT_TEAM\", self.DEFAULT_TEAM)\n cluster_type = _guess_cluster_type()\n cluster = os.getenv(\n \"AUDIOCRAFT_CLUSTER\", cluster_type.value\n )\n logger.info(\"Detecting cluster type %s\", cluster_type)\n\n self.cluster: str = cluster\n\n config_path = os.getenv(\n \"AUDIOCRAFT_CONFIG\",\n Path(__file__)\n .parent.parent.joinpath(\"config/teams\", self.team)\n .with_suffix(\".yaml\"),\n )\n self.config = omegaconf.OmegaConf.load(config_path)\n self._dataset_mappers = []\n cluster_config = self._get_cluster_config()\n if \"dataset_mappers\" in cluster_config:\n for pattern, repl in cluster_config[\"dataset_mappers\"].items():\n regex = re.compile(pattern)\n self._dataset_mappers.append((regex, repl))\n\n def _get_cluster_config(self) -> omegaconf.DictConfig:\n assert isinstance(self.config, omegaconf.DictConfig)\n return self.config[self.cluster]\n\n @classmethod\n def instance(cls):\n if cls._instance is None:\n cls._instance = cls()\n return cls._instance\n\n @classmethod\n def reset(cls):\n \"\"\"Clears the environment and forces a reload on next invocation.\"\"\"\n cls._instance = None\n\n @classmethod\n def get_team(cls) -> str:\n \"\"\"Gets the selected team as dictated by the AUDIOCRAFT_TEAM env var.\n If not defined, defaults to \"labs\".\n \"\"\"\n return cls.instance().team\n\n @classmethod\n def get_cluster(cls) -> str:\n \"\"\"Gets the detected cluster.\n This value can be overridden by the AUDIOCRAFT_CLUSTER env var.\n \"\"\"\n return cls.instance().cluster\n\n @classmethod\n def get_dora_dir(cls) -> Path:\n \"\"\"Gets the path to the dora directory for the current team and cluster.\n Value is overridden by the AUDIOCRAFT_DORA_DIR env var.\n \"\"\"\n cluster_config = cls.instance()._get_cluster_config()\n dora_dir = os.getenv(\"AUDIOCRAFT_DORA_DIR\", cluster_config[\"dora_dir\"])\n logger.warning(f\"Dora directory: {dora_dir}\")\n return Path(dora_dir)\n\n @classmethod\n def get_reference_dir(cls) -> Path:\n \"\"\"Gets the path to the reference directory for the current team and cluster.\n Value is overridden by the AUDIOCRAFT_REFERENCE_DIR env var.\n \"\"\"\n cluster_config = cls.instance()._get_cluster_config()\n return Path(os.getenv(\"AUDIOCRAFT_REFERENCE_DIR\", cluster_config[\"reference_dir\"]))\n\n @classmethod\n def get_slurm_exclude(cls) -> tp.Optional[str]:\n \"\"\"Get the list of nodes to exclude for that cluster.\"\"\"\n cluster_config = cls.instance()._get_cluster_config()\n return cluster_config.get(\"slurm_exclude\")\n\n @classmethod\n def get_slurm_partitions(cls, partition_types: tp.Optional[tp.List[str]] = None) -> str:\n \"\"\"Gets the requested partitions for the current team and cluster as a comma-separated string.\n\n Args:\n partition_types (list[str], optional): partition types to retrieve. Values must be\n from ['global', 'team']. If not provided, the global partition is returned.\n \"\"\"\n if not partition_types:\n partition_types = [\"global\"]\n\n cluster_config = cls.instance()._get_cluster_config()\n partitions = [\n cluster_config[\"partitions\"][partition_type]\n for partition_type in partition_types\n ]\n return \",\".join(partitions)\n\n @classmethod\n def resolve_reference_path(cls, path: tp.Union[str, Path]) -> Path:\n \"\"\"Converts reference placeholder in path with configured reference dir to resolve paths.\n\n Args:\n path (str or Path): Path to resolve.\n Returns:\n Path: Resolved path.\n \"\"\"\n path = str(path)\n\n if path.startswith(\"//reference\"):\n reference_dir = cls.get_reference_dir()\n logger.warn(f\"Reference directory: {reference_dir}\")\n assert (\n reference_dir.exists() and reference_dir.is_dir()\n ), f\"Reference directory does not exist: {reference_dir}.\"\n path = re.sub(\"^//reference\", str(reference_dir), path)\n\n return Path(path)\n\n @classmethod\n def apply_dataset_mappers(cls, path: str) -> str:\n \"\"\"Applies dataset mapping regex rules as defined in the configuration.\n If no rules are defined, the path is returned as-is.\n \"\"\"\n instance = cls.instance()\n\n for pattern, repl in instance._dataset_mappers:\n path = pattern.sub(repl, path)\n\n return path" }, { "identifier": "ResidualVectorQuantizer", "path": "audiocraft/quantization/vq.py", "snippet": "class ResidualVectorQuantizer(BaseQuantizer):\n \"\"\"Residual Vector Quantizer.\n\n Args:\n dimension (int): Dimension of the codebooks.\n n_q (int): Number of residual vector quantizers used.\n q_dropout (bool): Random quantizer drop out at train time.\n bins (int): Codebook size.\n decay (float): Decay for exponential moving average over the codebooks.\n kmeans_init (bool): Whether to use kmeans to initialize the codebooks.\n kmeans_iters (int): Number of iterations used for kmeans initialization.\n threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes\n that have an exponential moving average cluster size less than the specified threshold with\n randomly selected vector from the current batch.\n orthogonal_reg_weight (float): Orthogonal regularization weights.\n orthogonal_reg_active_codes_only (bool): Apply orthogonal regularization only on active codes.\n orthogonal_reg_max_codes (optional int): Maximum number of codes to consider.\n for orthogonal regularization.\n \"\"\"\n def __init__(\n self,\n dimension: int = 256,\n n_q: int = 8,\n q_dropout: bool = False,\n bins: int = 1024,\n decay: float = 0.99,\n kmeans_init: bool = True,\n kmeans_iters: int = 10,\n threshold_ema_dead_code: int = 2,\n orthogonal_reg_weight: float = 0.0,\n orthogonal_reg_active_codes_only: bool = False,\n orthogonal_reg_max_codes: tp.Optional[int] = None,\n ):\n super().__init__()\n self.max_n_q = n_q\n self.n_q = n_q\n self.q_dropout = q_dropout\n self.dimension = dimension\n self.bins = bins\n self.decay = decay\n self.kmeans_init = kmeans_init\n self.kmeans_iters = kmeans_iters\n self.threshold_ema_dead_code = threshold_ema_dead_code\n self.orthogonal_reg_weight = orthogonal_reg_weight\n self.orthogonal_reg_active_codes_only = orthogonal_reg_active_codes_only\n self.orthogonal_reg_max_codes = orthogonal_reg_max_codes\n self.vq = ResidualVectorQuantization(\n dim=self.dimension,\n codebook_size=self.bins,\n num_quantizers=self.n_q,\n decay=self.decay,\n kmeans_init=self.kmeans_init,\n kmeans_iters=self.kmeans_iters,\n threshold_ema_dead_code=self.threshold_ema_dead_code,\n orthogonal_reg_weight=self.orthogonal_reg_weight,\n orthogonal_reg_active_codes_only=self.orthogonal_reg_active_codes_only,\n orthogonal_reg_max_codes=self.orthogonal_reg_max_codes,\n channels_last=False\n )\n\n def forward(self, x: torch.Tensor, frame_rate: int):\n n_q = self.n_q\n if self.training and self.q_dropout:\n n_q = int(torch.randint(1, self.n_q + 1, (1,)).item())\n bw_per_q = math.log2(self.bins) * frame_rate / 1000\n quantized, codes, commit_loss = self.vq(x, n_q=n_q)\n codes = codes.transpose(0, 1)\n # codes is [B, K, T], with T frames, K nb of codebooks.\n bw = torch.tensor(n_q * bw_per_q).to(x)\n return QuantizedResult(quantized, codes, bw, penalty=torch.mean(commit_loss))\n\n def encode(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Encode a given input tensor with the specified frame rate at the given bandwidth.\n The RVQ encode method sets the appropriate number of quantizer to use\n and returns indices for each quantizer.\n \"\"\"\n n_q = self.n_q\n codes = self.vq.encode(x, n_q=n_q)\n codes = codes.transpose(0, 1)\n # codes is [B, K, T], with T frames, K nb of codebooks.\n return codes\n\n def decode(self, codes: torch.Tensor) -> torch.Tensor:\n \"\"\"Decode the given codes to the quantized representation.\"\"\"\n # codes is [B, K, T], with T frames, K nb of codebooks, vq.decode expects [K, B, T].\n codes = codes.transpose(0, 1)\n quantized = self.vq.decode(codes)\n return quantized\n\n @property\n def total_codebooks(self):\n return self.max_n_q\n\n @property\n def num_codebooks(self):\n return self.n_q\n\n def set_num_codebooks(self, n: int):\n assert n > 0 and n <= self.max_n_q\n self.n_q = n" }, { "identifier": "TorchAutocast", "path": "audiocraft/utils/autocast.py", "snippet": "class TorchAutocast:\n \"\"\"TorchAutocast utility class.\n Allows you to enable and disable autocast. This is specially useful\n when dealing with different architectures and clusters with different\n levels of support.\n\n Args:\n enabled (bool): Whether to enable torch.autocast or not.\n args: Additional args for torch.autocast.\n kwargs: Additional kwargs for torch.autocast\n \"\"\"\n def __init__(self, enabled: bool, *args, **kwargs):\n self.autocast = torch.autocast(*args, **kwargs) if enabled else None\n\n def __enter__(self):\n if self.autocast is None:\n return\n try:\n self.autocast.__enter__()\n except RuntimeError:\n device = self.autocast.device\n dtype = self.autocast.fast_dtype\n raise RuntimeError(\n f\"There was an error autocasting with dtype={dtype} device={device}\\n\"\n \"If you are on the FAIR Cluster, you might need to use autocast_dtype=float16\"\n )\n\n def __exit__(self, *args, **kwargs):\n if self.autocast is None:\n return\n self.autocast.__exit__(*args, **kwargs)" }, { "identifier": "EmbeddingCache", "path": "audiocraft/utils/cache.py", "snippet": "class EmbeddingCache:\n \"\"\"Cache around embeddings computation for faster execution.\n The EmbeddingCache is storing pre-computed embeddings on disk and provides a simple API\n to retrieve the pre-computed embeddings on full inputs and extract only a given chunk\n using a user-provided function. When the cache is warm (all embeddings are pre-computed),\n the EmbeddingCache allows for faster training as it removes the need of computing the embeddings.\n Additionally, it provides in-memory cache around the loaded embeddings to limit IO footprint\n and synchronization points in the forward calls.\n\n Args:\n cache_path (Path): Path to folder where all pre-computed embeddings are saved on disk.\n device (str or torch.device): Device on which the embedding is returned.\n compute_embed_fn (callable[[Path, any, int], torch.Tensor], optional): Function to compute\n the embedding from a given object and path. This user provided function can compute the\n embedding from the provided object or using the provided path as entry point. The last parameter\n specify the index corresponding to the current embedding in the object that can represent batch metadata.\n extract_embed_fn (callable[[torch.Tensor, any, int], torch.Tensor], optional): Function to extract\n the desired embedding chunk from the full embedding loaded from the cache. The last parameter\n specify the index corresponding to the current embedding in the object that can represent batch metadata.\n If not specified, will return the full embedding unmodified.\n \"\"\"\n def __init__(self, cache_path: tp.Union[str, Path], device: tp.Union[str, torch.device],\n compute_embed_fn: tp.Callable[[Path, tp.Any, int], torch.Tensor],\n extract_embed_fn: tp.Optional[tp.Callable[[torch.Tensor, tp.Any, int], torch.Tensor]] = None):\n self.cache_path = Path(cache_path)\n self.device = device\n self._compute_embed_fn = compute_embed_fn\n self._extract_embed_fn: tp.Callable[[torch.Tensor, tp.Any, int], torch.Tensor]\n if extract_embed_fn is not None:\n self._extract_embed_fn = extract_embed_fn\n else:\n self._extract_embed_fn = partial(get_full_embed, device=device)\n if self.cache_path is not None:\n self.cache_path.mkdir(exist_ok=True, parents=True)\n logger.info(f\"Cache instantiated at: {self.cache_path}\")\n self.pool = ThreadPoolExecutor(8)\n self.pool.__enter__()\n self._current_batch_cache: dict = {}\n self._memory_cache: dict = {}\n\n def _get_cache_path(self, path: tp.Union[Path, str]):\n \"\"\"Get cache path for the given file path.\"\"\"\n sig = sha1(str(path).encode()).hexdigest()\n return self.cache_path / sig\n\n @staticmethod\n def _get_full_embed_from_cache(cache: Path):\n \"\"\"Loads full pre-computed embedding from the cache.\"\"\"\n try:\n embed = torch.load(cache, 'cpu')\n except Exception as exc:\n logger.error(\"Error loading %s: %r\", cache, exc)\n embed = None\n return embed\n\n def get_embed_from_cache(self, paths: tp.List[Path], x: tp.Any) -> torch.Tensor:\n \"\"\"Get embedding from cache, computing and storing it to cache if not already cached.\n The EmbeddingCache first tries to load the embedding from the in-memory cache\n containing the pre-computed chunks populated through `populate_embed_cache`.\n If not found, the full embedding is computed and stored on disk to be later accessed\n to populate the in-memory cache, and the desired embedding chunk is extracted and returned.\n\n Args:\n paths (list[Path or str]): List of paths from where the embeddings can be loaded.\n x (any): Object from which the embedding is extracted.\n \"\"\"\n embeds = []\n for idx, path in enumerate(paths):\n cache = self._get_cache_path(path)\n if cache in self._current_batch_cache:\n embed = self._current_batch_cache[cache]\n else:\n full_embed = self._compute_embed_fn(path, x, idx)\n try:\n with flashy.utils.write_and_rename(cache, pid=True) as f:\n torch.save(full_embed.cpu(), f)\n except Exception as exc:\n logger.error('Error saving embed %s (%s): %r', cache, full_embed.shape, exc)\n else:\n logger.info('New embed cache saved: %s (%s)', cache, full_embed.shape)\n embed = self._extract_embed_fn(full_embed, x, idx)\n embeds.append(embed)\n embed = torch.stack(embeds, dim=0)\n return embed\n\n def populate_embed_cache(self, paths: tp.List[Path], x: tp.Any) -> None:\n \"\"\"Populate in-memory caches for embeddings reading from the embeddings stored on disk.\n The in-memory caches consist in a cache for the full embedding and another cache for the\n final embedding chunk. Such caches are used to limit the IO access when computing the actual embeddings\n and reduce the IO footprint and synchronization points during forward passes.\n\n Args:\n paths (list[Path]): List of paths from where the embeddings can be loaded.\n x (any): Object from which the embedding is extracted.\n \"\"\"\n self._current_batch_cache.clear()\n if self.cache_path is not None:\n futures: list = []\n for path in paths:\n assert path is not None, \"Path is required for computation from cache\"\n cache = self._get_cache_path(path)\n if cache in self._memory_cache or not cache.exists():\n futures.append(None)\n else:\n futures.append(self.pool.submit(EmbeddingCache._get_full_embed_from_cache, cache))\n for idx, (path, future) in enumerate(zip(paths, futures)):\n assert path is not None\n cache = self._get_cache_path(path)\n full_embed = None\n if future is None:\n if cache in self._memory_cache:\n full_embed = self._memory_cache[cache]\n else:\n full_embed = future.result()\n if full_embed is not None:\n self._memory_cache[cache] = full_embed\n full_embed = full_embed.to(self.device)\n if full_embed is not None:\n embed = self._extract_embed_fn(full_embed, x, idx)\n self._current_batch_cache[cache] = embed" }, { "identifier": "collate", "path": "audiocraft/utils/utils.py", "snippet": "def collate(tensors: tp.List[torch.Tensor], dim: int = 0) -> tp.Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Get a list of tensors and collate them to a single tensor. according to the following logic:\n - `dim` specifies the time dimension which will be stacked and padded.\n - The output will contain 1 new dimension (dimension index 0) which will be the size of\n of the original list.\n\n Args:\n tensors (tp.List[torch.Tensor]): List of tensors to collate.\n dim (int): Dimension which will be stacked and padded.\n Returns:\n tp.Tuple[torch.Tensor, torch.Tensor]:\n torch.Tensor: Stacked and padded tensor. The output will contain 1 new dimension\n (dimension index 0) which will be the size of the original list.\n torch.Tensor: Tensor containing length of original tensor sizes (without padding).\n \"\"\"\n tensors = [x.transpose(0, dim) for x in tensors]\n lens = torch.LongTensor([len(x) for x in tensors])\n padded_tensors = pad_sequence(tensors)\n padded_tensors = padded_tensors.transpose(0, 1)\n padded_tensors = padded_tensors.transpose(1, dim + 1)\n return padded_tensors, lens" }, { "identifier": "hash_trick", "path": "audiocraft/utils/utils.py", "snippet": "def hash_trick(word: str, vocab_size: int) -> int:\n \"\"\"Hash trick to pair each word with an index\n\n Args:\n word (str): word we wish to convert to an index\n vocab_size (int): size of the vocabulary\n Returns:\n int: index of the word in the embedding LUT\n \"\"\"\n hash = int(hashlib.sha256(word.encode(\"utf-8\")).hexdigest(), 16)\n return hash % vocab_size" }, { "identifier": "length_to_mask", "path": "audiocraft/utils/utils.py", "snippet": "def length_to_mask(lengths: torch.Tensor, max_len: tp.Optional[int] = None) -> torch.Tensor:\n \"\"\"Utility function to convert a tensor of sequence lengths to a mask (useful when working on padded sequences).\n For example: [3, 5] => [[1, 1, 1, 0, 0], [1, 1, 1, 1, 1]]\n\n Args:\n lengths (torch.Tensor): tensor with lengths\n max_len (int): can set the max length manually. Defaults to None.\n Returns:\n torch.Tensor: mask with 0s where there is pad tokens else 1s\n \"\"\"\n assert len(lengths.shape) == 1, \"Length shape should be 1 dimensional.\"\n final_length = lengths.max().item() if not max_len else max_len\n final_length = max(final_length, 1) # if all seqs are of len zero we don't want a zero-size tensor\n return torch.arange(final_length, device=lengths.device)[None, :] < lengths[:, None]" }, { "identifier": "load_clap_state_dict", "path": "audiocraft/utils/utils.py", "snippet": "def load_clap_state_dict(clap_model, path: tp.Union[str, Path]):\n \"\"\"Wrapper around state dict loading of CLAP model\n addressing compatibility issues between CLAP and AudioCraft\n HuggingFace transformer version.\n See: https://github.com/LAION-AI/CLAP/issues/118\n \"\"\"\n from clap_module.factory import load_state_dict # type: ignore\n pkg = load_state_dict(path)\n pkg.pop('text_branch.embeddings.position_ids', None)\n clap_model.model.load_state_dict(pkg)" }, { "identifier": "warn_once", "path": "audiocraft/utils/utils.py", "snippet": "@lru_cache(None)\ndef warn_once(logger, msg):\n \"\"\"Warn about a given message only once.\"\"\"\n logger.warning(msg)" }, { "identifier": "chords", "path": "audiocraft/modules/btc/utils/chords.py", "snippet": "def chords(self, labels):\n\n \"\"\"\n Transform a list of chord labels into an array of internal numeric\n representations.\n\n Parameters\n ----------\n labels : list\n List of chord labels (str).\n\n Returns\n -------\n chords : numpy.array\n Structured array with columns 'root', 'bass', and 'intervals',\n containing a numeric representation of chords.\n\n \"\"\"\n crds = np.zeros(len(labels), dtype=CHORD_DTYPE)\n cache = {}\n for i, lbl in enumerate(labels):\n cv = cache.get(lbl, None)\n if cv is None:\n cv = self.chord(lbl)\n cache[lbl] = cv\n crds[i] = cv\n\n return crds" } ]
from collections import defaultdict from copy import deepcopy from dataclasses import dataclass, field from itertools import chain from pathlib import Path from num2words import num2words from transformers import RobertaTokenizer, T5EncoderModel, T5Tokenizer # type: ignore from torch import nn from torch.nn.utils.rnn import pad_sequence from .chroma import ChromaExtractor from .chord_chroma import ChordExtractor from .streaming import StreamingModule from .transformer import create_sin_embedding from ..data.audio import audio_read from ..data.audio_dataset import SegmentInfo from ..data.audio_utils import convert_audio from ..environment import AudioCraftEnvironment from ..quantization import ResidualVectorQuantizer from ..utils.autocast import TorchAutocast from ..utils.cache import EmbeddingCache from ..utils.utils import collate, hash_trick, length_to_mask, load_clap_state_dict, warn_once from .btc.utils import chords from demucs import pretrained from audiocraft.data.audio_dataset import AudioDataset from demucs.apply import apply_model from demucs.audio import convert_audio from demucs import pretrained from audiocraft.data.audio_dataset import AudioDataset from demucs.apply import apply_model from demucs.audio import convert_audio import logging import math import random import re import typing as tp import warnings import einops import spacy import torch import torch.nn.functional as F import numpy as np import laion_clap # type: ignore
13,735
if match_len_on_eval: self._use_masking = False self.duration = duration self.__dict__['demucs'] = pretrained.get_model('htdemucs').to(device) stem_sources: list = self.demucs.sources # type: ignore self.stem_indices = torch.LongTensor([stem_sources.index('vocals'), stem_sources.index('other')]).to(device) self.chroma = ChromaExtractor(sample_rate=sample_rate, n_chroma=n_chroma, radix2_exp=radix2_exp, **kwargs).to(device) self.chroma_len = self._get_chroma_len() self.eval_wavs: tp.Optional[torch.Tensor] = self._load_eval_wavs(eval_wavs, n_eval_wavs) self.cache = None if cache_path is not None: self.cache = EmbeddingCache(Path(cache_path) / 'wav', self.device, compute_embed_fn=self._get_full_chroma_for_cache, extract_embed_fn=self._extract_chroma_chunk) def _downsampling_factor(self) -> int: return self.chroma.winhop def _load_eval_wavs(self, path: tp.Optional[str], num_samples: int) -> tp.Optional[torch.Tensor]: """Load pre-defined waveforms from a json. These waveforms will be used for chroma extraction during evaluation. This is done to make the evaluation on MusicCaps fair (we shouldn't see the chromas of MusicCaps). """ if path is None: return None logger.info(f"Loading evaluation wavs from {path}") dataset: AudioDataset = AudioDataset.from_meta( path, segment_duration=self.duration, min_audio_duration=self.duration, sample_rate=self.sample_rate, channels=1) if len(dataset) > 0: eval_wavs = dataset.collater([dataset[i] for i in range(num_samples)]).to(self.device) logger.info(f"Using {len(eval_wavs)} evaluation wavs for chroma-stem conditioner") return eval_wavs else: raise ValueError("Could not find evaluation wavs, check lengths of wavs") def reset_eval_wavs(self, eval_wavs: tp.Optional[torch.Tensor]) -> None: self.eval_wavs = eval_wavs def has_eval_wavs(self) -> bool: return self.eval_wavs is not None def _sample_eval_wavs(self, num_samples: int) -> torch.Tensor: """Sample wavs from a predefined list.""" assert self.eval_wavs is not None, "Cannot sample eval wavs as no eval wavs provided." total_eval_wavs = len(self.eval_wavs) out = self.eval_wavs if num_samples > total_eval_wavs: out = self.eval_wavs.repeat(num_samples // total_eval_wavs + 1, 1, 1) return out[torch.randperm(len(out))][:num_samples] def _get_chroma_len(self) -> int: """Get length of chroma during training.""" dummy_wav = torch.zeros((1, int(self.sample_rate * self.duration)), device=self.device) dummy_chr = self.chroma(dummy_wav) return dummy_chr.shape[1] @torch.no_grad() def _get_stemmed_wav(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: """Get parts of the wav that holds the melody, extracting the main stems from the wav.""" with self.autocast: wav = convert_audio( wav, sample_rate, self.demucs.samplerate, self.demucs.audio_channels) # type: ignore stems = apply_model(self.demucs, wav, device=self.device) stems = stems[:, self.stem_indices] # extract relevant stems for melody conditioning mix_wav = stems.sum(1) # merge extracted stems to single waveform mix_wav = convert_audio(mix_wav, self.demucs.samplerate, self.sample_rate, 1) # type: ignore return mix_wav @torch.no_grad() def _extract_chroma(self, wav: torch.Tensor) -> torch.Tensor: """Extract chroma features from the waveform.""" with self.autocast: return self.chroma(wav) @torch.no_grad() def _compute_wav_embedding(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: """Compute wav embedding, applying stem and chroma extraction.""" # avoid 0-size tensors when we are working with null conds if wav.shape[-1] == 1: return self._extract_chroma(wav) stems = self._get_stemmed_wav(wav, sample_rate) chroma = self._extract_chroma(stems) return chroma @torch.no_grad() def _get_full_chroma_for_cache(self, path: tp.Union[str, Path], x: WavCondition, idx: int) -> torch.Tensor: """Extract chroma from the whole audio waveform at the given path.""" wav, sr = audio_read(path) wav = wav[None].to(self.device) wav = convert_audio(wav, sr, self.sample_rate, to_channels=1) chroma = self._compute_wav_embedding(wav, self.sample_rate)[0] return chroma def _extract_chroma_chunk(self, full_chroma: torch.Tensor, x: WavCondition, idx: int) -> torch.Tensor: """Extract a chunk of chroma from the full chroma derived from the full waveform.""" wav_length = x.wav.shape[-1] seek_time = x.seek_time[idx] assert seek_time is not None, ( "WavCondition seek_time is required " "when extracting chroma chunks from pre-computed chroma.") full_chroma = full_chroma.float() frame_rate = self.sample_rate / self._downsampling_factor() target_length = int(frame_rate * wav_length / self.sample_rate) index = int(frame_rate * seek_time) out = full_chroma[index: index + target_length] out = F.pad(out[None], (0, 0, 0, target_length - out.shape[0]))[0] return out.to(self.device) @torch.no_grad() def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor: """Get the wav embedding from the WavCondition. The conditioner will either extract the embedding on-the-fly computing it from the condition wav directly or will rely on the embedding cache to load the pre-computed embedding if relevant. """ sampled_wav: tp.Optional[torch.Tensor] = None if not self.training and self.eval_wavs is not None:
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. logger = logging.getLogger(__name__) TextCondition = tp.Optional[str] # a text condition can be a string or None (if doesn't exist) ConditionType = tp.Tuple[torch.Tensor, torch.Tensor] # condition, mask class WavCondition(tp.NamedTuple): wav: torch.Tensor length: torch.Tensor sample_rate: tp.List[int] path: tp.List[tp.Optional[str]] = [] seek_time: tp.List[tp.Optional[float]] = [] class WavChordTextCondition(tp.NamedTuple): wav: tp.Union[torch.Tensor,str,tp.List[str]] length: torch.Tensor sample_rate: tp.List[int] path: tp.List[tp.Optional[str]] = [] seek_time: tp.List[tp.Optional[float]] = [] bpm : tp.List[tp.Optional[tp.Union[int, float]]] = [] meter : tp.List[tp.Optional[int]] = [] class JointEmbedCondition(tp.NamedTuple): wav: torch.Tensor text: tp.List[tp.Optional[str]] length: torch.Tensor sample_rate: tp.List[int] path: tp.List[tp.Optional[str]] = [] seek_time: tp.List[tp.Optional[float]] = [] @dataclass class ConditioningAttributes: text: tp.Dict[str, tp.Optional[str]] = field(default_factory=dict) wav: tp.Dict[str, tp.Union[WavCondition,WavChordTextCondition]] = field(default_factory=dict) joint_embed: tp.Dict[str, JointEmbedCondition] = field(default_factory=dict) def __getitem__(self, item): return getattr(self, item) @property def text_attributes(self): return self.text.keys() @property def wav_attributes(self): return self.wav.keys() @property def joint_embed_attributes(self): return self.joint_embed.keys() @property def attributes(self): return { "text": self.text_attributes, "wav": self.wav_attributes, "joint_embed": self.joint_embed_attributes, } def to_flat_dict(self): return { **{f"text.{k}": v for k, v in self.text.items()}, **{f"wav.{k}": v for k, v in self.wav.items()}, **{f"joint_embed.{k}": v for k, v in self.joint_embed.items()} } @classmethod def from_flat_dict(cls, x): out = cls() for k, v in x.items(): kind, att = k.split(".") out[kind][att] = v return out class SegmentWithAttributes(SegmentInfo): """Base class for all dataclasses that are used for conditioning. All child classes should implement `to_condition_attributes` that converts the existing attributes to a dataclass of type ConditioningAttributes. """ def to_condition_attributes(self) -> ConditioningAttributes: raise NotImplementedError() def nullify_condition(condition: ConditionType, dim: int = 1): """Transform an input condition to a null condition. The way it is done by converting it to a single zero vector similarly to how it is done inside WhiteSpaceTokenizer and NoopTokenizer. Args: condition (ConditionType): A tuple of condition and mask (tuple[torch.Tensor, torch.Tensor]) dim (int): The dimension that will be truncated (should be the time dimension) WARNING!: dim should not be the batch dimension! Returns: ConditionType: A tuple of null condition and mask """ assert dim != 0, "dim cannot be the batch dimension!" assert isinstance(condition, tuple) and \ isinstance(condition[0], torch.Tensor) and \ isinstance(condition[1], torch.Tensor), "'nullify_condition' got an unexpected input type!" cond, mask = condition B = cond.shape[0] last_dim = cond.dim() - 1 out = cond.transpose(dim, last_dim) out = 0. * out[..., :1] out = out.transpose(dim, last_dim) mask = torch.zeros((B, 1), device=out.device).int() assert cond.dim() == out.dim() return out, mask def nullify_wav(cond: tp.Union[WavCondition,WavChordTextCondition]) -> tp.Union[WavCondition,WavChordTextCondition]: """Transform a WavCondition to a nullified WavCondition. It replaces the wav by a null tensor, forces its length to 0, and replaces metadata by dummy attributes. Args: cond (WavCondition): Wav condition with wav, tensor of shape [B, T]. Returns: WavCondition: Nullified wav condition. """ if not isinstance(cond, WavChordTextCondition): null_wav, _ = nullify_condition((cond.wav, torch.zeros_like(cond.wav)), dim=cond.wav.dim() - 1) return WavCondition( wav=null_wav, length=torch.tensor([0] * cond.wav.shape[0], device=cond.wav.device), sample_rate=cond.sample_rate, path=[None] * cond.wav.shape[0], seek_time=[None] * cond.wav.shape[0], ) else: return WavChordTextCondition( wav=['N']* len(cond.wav), length=torch.tensor([0] * len(cond.wav), device=cond.length.device), sample_rate=cond.sample_rate, path=[None], seek_time=[None], bpm = cond.bpm, meter = cond.meter ) def nullify_joint_embed(embed: JointEmbedCondition) -> JointEmbedCondition: """Nullify the joint embedding condition by replacing it by a null tensor, forcing its length to 0, and replacing metadata by dummy attributes. Args: cond (JointEmbedCondition): Joint embedding condition with wav and text, wav tensor of shape [B, C, T]. """ null_wav, _ = nullify_condition((embed.wav, torch.zeros_like(embed.wav)), dim=embed.wav.dim() - 1) return JointEmbedCondition( wav=null_wav, text=[None] * len(embed.text), length=torch.LongTensor([0]).to(embed.wav.device), sample_rate=embed.sample_rate, path=[None] * embed.wav.shape[0], seek_time=[0] * embed.wav.shape[0], ) class Tokenizer: """Base tokenizer implementation (in case we want to introduce more advances tokenizers in the future). """ def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: raise NotImplementedError() class WhiteSpaceTokenizer(Tokenizer): """This tokenizer should be used for natural language descriptions. For example: ["he didn't, know he's going home.", 'shorter sentence'] => [[78, 62, 31, 4, 78, 25, 19, 34], [59, 77, 0, 0, 0, 0, 0, 0]] """ PUNCTUATION = "?:!.,;" def __init__(self, n_bins: int, pad_idx: int = 0, language: str = "en_core_web_sm", lemma: bool = True, stopwords: bool = True) -> None: self.n_bins = n_bins self.pad_idx = pad_idx self.lemma = lemma self.stopwords = stopwords try: self.nlp = spacy.load(language) except IOError: spacy.cli.download(language) # type: ignore self.nlp = spacy.load(language) @tp.no_type_check def __call__(self, texts: tp.List[tp.Optional[str]], return_text: bool = False) -> tp.Tuple[torch.Tensor, torch.Tensor]: """Take a list of strings and convert them to a tensor of indices. Args: texts (list[str]): List of strings. return_text (bool, optional): Whether to return text as additional tuple item. Defaults to False. Returns: tuple[torch.Tensor, torch.Tensor]: - Indices of words in the LUT. - And a mask indicating where the padding tokens are """ output, lengths = [], [] texts = deepcopy(texts) for i, text in enumerate(texts): # if current sample doesn't have a certain attribute, replace with pad token if text is None: output.append(torch.Tensor([self.pad_idx])) lengths.append(0) continue # convert numbers to words text = re.sub(r"(\d+)", lambda x: num2words(int(x.group(0))), text) # type: ignore # normalize text text = self.nlp(text) # type: ignore # remove stopwords if self.stopwords: text = [w for w in text if not w.is_stop] # type: ignore # remove punctuation text = [w for w in text if w.text not in self.PUNCTUATION] # type: ignore # lemmatize if needed text = [getattr(t, "lemma_" if self.lemma else "text") for t in text] # type: ignore texts[i] = " ".join(text) lengths.append(len(text)) # convert to tensor tokens = torch.Tensor([hash_trick(w, self.n_bins) for w in text]) output.append(tokens) mask = length_to_mask(torch.IntTensor(lengths)).int() padded_output = pad_sequence(output, padding_value=self.pad_idx).int().t() if return_text: return padded_output, mask, texts # type: ignore return padded_output, mask class NoopTokenizer(Tokenizer): """This tokenizer should be used for global conditioners such as: artist, genre, key, etc. The difference between this and WhiteSpaceTokenizer is that NoopTokenizer does not split strings, so "Jeff Buckley" will get it's own index. Whereas WhiteSpaceTokenizer will split it to ["Jeff", "Buckley"] and return an index per word. For example: ["Queen", "ABBA", "Jeff Buckley"] => [43, 55, 101] ["Metal", "Rock", "Classical"] => [0, 223, 51] """ def __init__(self, n_bins: int, pad_idx: int = 0): self.n_bins = n_bins self.pad_idx = pad_idx def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: output, lengths = [], [] for text in texts: # if current sample doesn't have a certain attribute, replace with pad token if text is None: output.append(self.pad_idx) lengths.append(0) else: output.append(hash_trick(text, self.n_bins)) lengths.append(1) tokens = torch.LongTensor(output).unsqueeze(1) mask = length_to_mask(torch.IntTensor(lengths)).int() return tokens, mask class BaseConditioner(nn.Module): """Base model for all conditioner modules. We allow the output dim to be different than the hidden dim for two reasons: 1) keep our LUTs small when the vocab is large; 2) make all condition dims consistent. Args: dim (int): Hidden dim of the model. output_dim (int): Output dim of the conditioner. """ def __init__(self, dim: int, output_dim: int): super().__init__() self.dim = dim self.output_dim = output_dim self.output_proj = nn.Linear(dim, output_dim) def tokenize(self, *args, **kwargs) -> tp.Any: """Should be any part of the processing that will lead to a synchronization point, e.g. BPE tokenization with transfer to the GPU. The returned value will be saved and return later when calling forward(). """ raise NotImplementedError() def forward(self, inputs: tp.Any) -> ConditionType: """Gets input that should be used as conditioning (e.g, genre, description or a waveform). Outputs a ConditionType, after the input data was embedded as a dense vector. Returns: ConditionType: - A tensor of size [B, T, D] where B is the batch size, T is the length of the output embedding and D is the dimension of the embedding. - And a mask indicating where the padding tokens. """ raise NotImplementedError() class TextConditioner(BaseConditioner): ... class LUTConditioner(TextConditioner): """Lookup table TextConditioner. Args: n_bins (int): Number of bins. dim (int): Hidden dim of the model (text-encoder/LUT). output_dim (int): Output dim of the conditioner. tokenizer (str): Name of the tokenizer. pad_idx (int, optional): Index for padding token. Defaults to 0. """ def __init__(self, n_bins: int, dim: int, output_dim: int, tokenizer: str, pad_idx: int = 0): super().__init__(dim, output_dim) self.embed = nn.Embedding(n_bins, dim) self.tokenizer: Tokenizer if tokenizer == 'whitespace': self.tokenizer = WhiteSpaceTokenizer(n_bins, pad_idx=pad_idx) elif tokenizer == 'noop': self.tokenizer = NoopTokenizer(n_bins, pad_idx=pad_idx) else: raise ValueError(f"unrecognized tokenizer `{tokenizer}`.") def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: device = self.embed.weight.device tokens, mask = self.tokenizer(x) tokens, mask = tokens.to(device), mask.to(device) return tokens, mask def forward(self, inputs: tp.Tuple[torch.Tensor, torch.Tensor]) -> ConditionType: tokens, mask = inputs embeds = self.embed(tokens) embeds = self.output_proj(embeds) embeds = (embeds * mask.unsqueeze(-1)) return embeds, mask class T5Conditioner(TextConditioner): """T5-based TextConditioner. Args: name (str): Name of the T5 model. output_dim (int): Output dim of the conditioner. finetune (bool): Whether to fine-tune T5 at train time. device (str): Device for T5 Conditioner. autocast_dtype (tp.Optional[str], optional): Autocast dtype. word_dropout (float, optional): Word dropout probability. normalize_text (bool, optional): Whether to apply text normalization. """ MODELS = ["t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b", "google/flan-t5-small", "google/flan-t5-base", "google/flan-t5-large", "google/flan-t5-xl", "google/flan-t5-xxl"] MODELS_DIMS = { "t5-small": 512, "t5-base": 768, "t5-large": 1024, "t5-3b": 1024, "t5-11b": 1024, "google/flan-t5-small": 512, "google/flan-t5-base": 768, "google/flan-t5-large": 1024, "google/flan-t5-3b": 1024, "google/flan-t5-11b": 1024, } def __init__(self, name: str, output_dim: int, finetune: bool, device: str, autocast_dtype: tp.Optional[str] = 'float32', word_dropout: float = 0., normalize_text: bool = False): assert name in self.MODELS, f"Unrecognized t5 model name (should in {self.MODELS})" super().__init__(self.MODELS_DIMS[name], output_dim) self.device = device self.name = name self.finetune = finetune self.word_dropout = word_dropout if autocast_dtype is None or self.device == 'cpu': self.autocast = TorchAutocast(enabled=False) if self.device != 'cpu': logger.warning("T5 has no autocast, this might lead to NaN") else: dtype = getattr(torch, autocast_dtype) assert isinstance(dtype, torch.dtype) logger.info(f"T5 will be evaluated with autocast as {autocast_dtype}") self.autocast = TorchAutocast(enabled=True, device_type=self.device, dtype=dtype) # Let's disable logging temporarily because T5 will vomit some errors otherwise. # thanks https://gist.github.com/simon-weber/7853144 previous_level = logging.root.manager.disable logging.disable(logging.ERROR) with warnings.catch_warnings(): warnings.simplefilter("ignore") try: self.t5_tokenizer = T5Tokenizer.from_pretrained(name) t5 = T5EncoderModel.from_pretrained(name).train(mode=finetune) finally: logging.disable(previous_level) if finetune: self.t5 = t5 else: # this makes sure that the t5 models is not part # of the saved checkpoint self.__dict__['t5'] = t5.to(device) self.normalize_text = normalize_text if normalize_text: self.text_normalizer = WhiteSpaceTokenizer(1, lemma=True, stopwords=True) def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Dict[str, torch.Tensor]: # if current sample doesn't have a certain attribute, replace with empty string entries: tp.List[str] = [xi if xi is not None else "" for xi in x] if self.normalize_text: _, _, entries = self.text_normalizer(entries, return_text=True) if self.word_dropout > 0. and self.training: new_entries = [] for entry in entries: words = [word for word in entry.split(" ") if random.random() >= self.word_dropout] new_entries.append(" ".join(words)) entries = new_entries empty_idx = torch.LongTensor([i for i, xi in enumerate(entries) if xi == ""]) inputs = self.t5_tokenizer(entries, return_tensors='pt', padding=True).to(self.device) mask = inputs['attention_mask'] mask[empty_idx, :] = 0 # zero-out index where the input is non-existant return inputs def forward(self, inputs: tp.Dict[str, torch.Tensor]) -> ConditionType: mask = inputs['attention_mask'] with torch.set_grad_enabled(self.finetune), self.autocast: embeds = self.t5(**inputs).last_hidden_state embeds = self.output_proj(embeds.to(self.output_proj.weight)) embeds = (embeds * mask.unsqueeze(-1)) return embeds, mask class WaveformConditioner(BaseConditioner): """Base class for all conditioners that take a waveform as input. Classes that inherit must implement `_get_wav_embedding` that outputs a continuous tensor, and `_downsampling_factor` that returns the down-sampling factor of the embedding model. Args: dim (int): The internal representation dimension. output_dim (int): Output dimension. device (tp.Union[torch.device, str]): Device. """ def __init__(self, dim: int, output_dim: int, device: tp.Union[torch.device, str]): super().__init__(dim, output_dim) self.device = device # if False no masking is done, used in ChromaStemConditioner when completing by periodicity a sample. self._use_masking = True def tokenize(self, x: WavCondition) -> WavCondition: wav, length, sample_rate, path, seek_time = x assert length is not None return WavCondition(wav.to(self.device), length.to(self.device), sample_rate, path, seek_time) def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor: """Gets as input a WavCondition and returns a dense embedding.""" raise NotImplementedError() def _downsampling_factor(self): """Returns the downsampling factor of the embedding model.""" raise NotImplementedError() def forward(self, x: WavCondition) -> ConditionType: """Extract condition embedding and mask from a waveform and its metadata. Args: x (WavCondition): Waveform condition containing raw waveform and metadata. Returns: ConditionType: a dense vector representing the conditioning along with its mask """ wav, lengths, *_ = x with torch.no_grad(): embeds = self._get_wav_embedding(x) embeds = embeds.to(self.output_proj.weight) embeds = self.output_proj(embeds) if lengths is not None and self._use_masking: lengths = lengths / self._downsampling_factor() mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore else: mask = torch.ones_like(embeds[..., 0]) embeds = (embeds * mask.unsqueeze(-1)) return embeds, mask class ChromaStemConditioner(WaveformConditioner): """Chroma conditioner based on stems. The ChromaStemConditioner uses DEMUCS to first filter out drums and bass, as the drums and bass often dominate the chroma leading to the chroma features not containing information about the melody. Args: output_dim (int): Output dimension for the conditioner. sample_rate (int): Sample rate for the chroma extractor. n_chroma (int): Number of chroma bins for the chroma extractor. radix2_exp (int): Size of stft window for the chroma extractor (power of 2, e.g. 12 -> 2^12). duration (int): duration used during training. This is later used for correct padding in case we are using chroma as prefix. match_len_on_eval (bool, optional): if True then all chromas are padded to the training duration. Defaults to False. eval_wavs (str, optional): path to a dataset manifest with waveform, this waveforms are used as conditions during eval (for cases where we don't want to leak test conditions like MusicCaps). Defaults to None. n_eval_wavs (int, optional): limits the number of waveforms used for conditioning. Defaults to 0. device (tp.Union[torch.device, str], optional): Device for the conditioner. **kwargs: Additional parameters for the chroma extractor. """ def __init__(self, output_dim: int, sample_rate: int, n_chroma: int, radix2_exp: int, duration: float, match_len_on_eval: bool = True, eval_wavs: tp.Optional[str] = None, n_eval_wavs: int = 0, cache_path: tp.Optional[tp.Union[str, Path]] = None, device: tp.Union[torch.device, str] = 'cpu', **kwargs): super().__init__(dim=n_chroma, output_dim=output_dim, device=device) self.autocast = TorchAutocast(enabled=device != 'cpu', device_type=self.device, dtype=torch.float32) self.sample_rate = sample_rate self.match_len_on_eval = match_len_on_eval if match_len_on_eval: self._use_masking = False self.duration = duration self.__dict__['demucs'] = pretrained.get_model('htdemucs').to(device) stem_sources: list = self.demucs.sources # type: ignore self.stem_indices = torch.LongTensor([stem_sources.index('vocals'), stem_sources.index('other')]).to(device) self.chroma = ChromaExtractor(sample_rate=sample_rate, n_chroma=n_chroma, radix2_exp=radix2_exp, **kwargs).to(device) self.chroma_len = self._get_chroma_len() self.eval_wavs: tp.Optional[torch.Tensor] = self._load_eval_wavs(eval_wavs, n_eval_wavs) self.cache = None if cache_path is not None: self.cache = EmbeddingCache(Path(cache_path) / 'wav', self.device, compute_embed_fn=self._get_full_chroma_for_cache, extract_embed_fn=self._extract_chroma_chunk) def _downsampling_factor(self) -> int: return self.chroma.winhop def _load_eval_wavs(self, path: tp.Optional[str], num_samples: int) -> tp.Optional[torch.Tensor]: """Load pre-defined waveforms from a json. These waveforms will be used for chroma extraction during evaluation. This is done to make the evaluation on MusicCaps fair (we shouldn't see the chromas of MusicCaps). """ if path is None: return None logger.info(f"Loading evaluation wavs from {path}") dataset: AudioDataset = AudioDataset.from_meta( path, segment_duration=self.duration, min_audio_duration=self.duration, sample_rate=self.sample_rate, channels=1) if len(dataset) > 0: eval_wavs = dataset.collater([dataset[i] for i in range(num_samples)]).to(self.device) logger.info(f"Using {len(eval_wavs)} evaluation wavs for chroma-stem conditioner") return eval_wavs else: raise ValueError("Could not find evaluation wavs, check lengths of wavs") def reset_eval_wavs(self, eval_wavs: tp.Optional[torch.Tensor]) -> None: self.eval_wavs = eval_wavs def has_eval_wavs(self) -> bool: return self.eval_wavs is not None def _sample_eval_wavs(self, num_samples: int) -> torch.Tensor: """Sample wavs from a predefined list.""" assert self.eval_wavs is not None, "Cannot sample eval wavs as no eval wavs provided." total_eval_wavs = len(self.eval_wavs) out = self.eval_wavs if num_samples > total_eval_wavs: out = self.eval_wavs.repeat(num_samples // total_eval_wavs + 1, 1, 1) return out[torch.randperm(len(out))][:num_samples] def _get_chroma_len(self) -> int: """Get length of chroma during training.""" dummy_wav = torch.zeros((1, int(self.sample_rate * self.duration)), device=self.device) dummy_chr = self.chroma(dummy_wav) return dummy_chr.shape[1] @torch.no_grad() def _get_stemmed_wav(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: """Get parts of the wav that holds the melody, extracting the main stems from the wav.""" with self.autocast: wav = convert_audio( wav, sample_rate, self.demucs.samplerate, self.demucs.audio_channels) # type: ignore stems = apply_model(self.demucs, wav, device=self.device) stems = stems[:, self.stem_indices] # extract relevant stems for melody conditioning mix_wav = stems.sum(1) # merge extracted stems to single waveform mix_wav = convert_audio(mix_wav, self.demucs.samplerate, self.sample_rate, 1) # type: ignore return mix_wav @torch.no_grad() def _extract_chroma(self, wav: torch.Tensor) -> torch.Tensor: """Extract chroma features from the waveform.""" with self.autocast: return self.chroma(wav) @torch.no_grad() def _compute_wav_embedding(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: """Compute wav embedding, applying stem and chroma extraction.""" # avoid 0-size tensors when we are working with null conds if wav.shape[-1] == 1: return self._extract_chroma(wav) stems = self._get_stemmed_wav(wav, sample_rate) chroma = self._extract_chroma(stems) return chroma @torch.no_grad() def _get_full_chroma_for_cache(self, path: tp.Union[str, Path], x: WavCondition, idx: int) -> torch.Tensor: """Extract chroma from the whole audio waveform at the given path.""" wav, sr = audio_read(path) wav = wav[None].to(self.device) wav = convert_audio(wav, sr, self.sample_rate, to_channels=1) chroma = self._compute_wav_embedding(wav, self.sample_rate)[0] return chroma def _extract_chroma_chunk(self, full_chroma: torch.Tensor, x: WavCondition, idx: int) -> torch.Tensor: """Extract a chunk of chroma from the full chroma derived from the full waveform.""" wav_length = x.wav.shape[-1] seek_time = x.seek_time[idx] assert seek_time is not None, ( "WavCondition seek_time is required " "when extracting chroma chunks from pre-computed chroma.") full_chroma = full_chroma.float() frame_rate = self.sample_rate / self._downsampling_factor() target_length = int(frame_rate * wav_length / self.sample_rate) index = int(frame_rate * seek_time) out = full_chroma[index: index + target_length] out = F.pad(out[None], (0, 0, 0, target_length - out.shape[0]))[0] return out.to(self.device) @torch.no_grad() def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor: """Get the wav embedding from the WavCondition. The conditioner will either extract the embedding on-the-fly computing it from the condition wav directly or will rely on the embedding cache to load the pre-computed embedding if relevant. """ sampled_wav: tp.Optional[torch.Tensor] = None if not self.training and self.eval_wavs is not None:
warn_once(logger, "Using precomputed evaluation wavs!")
15
2023-10-09 09:52:24+00:00
16k
RVC-Project/Retrieval-based-Voice-Conversion
rvc/modules/vc/modules.py
[ { "identifier": "Config", "path": "rvc/configs/config.py", "snippet": "class Config:\n def __new__(cls):\n if not hasattr(cls, \"_instance\"):\n cls._instance = super().__new__(cls)\n return cls._instance\n\n def __init__(self):\n self.device: str = \"cuda:0\"\n self.is_half: bool = True\n self.use_jit: bool = False\n self.n_cpu: int = cpu_count()\n self.gpu_name: str | None = None\n self.json_config = self.load_config_json()\n self.gpu_mem: int | None = None\n self.instead: str | None = None\n (\n self.python_cmd,\n self.listen_port,\n self.noparallel,\n self.noautoopen,\n self.dml,\n ) = self.arg_parse()\n self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()\n\n @staticmethod\n def load_config_json() -> dict:\n return {\n config_file: json.load(open(config_file, \"r\"))\n for config_file in version_config_list\n }\n\n @staticmethod\n def arg_parse() -> tuple:\n parser: argparse.ArgumentParser = argparse.ArgumentParser()\n parser.add_argument(\"--port\", type=int, default=7865, help=\"Listen port\")\n parser.add_argument(\n \"--pycmd\",\n type=str,\n default=sys.executable or \"python\",\n help=\"Python command\",\n )\n parser.add_argument(\n \"--noparallel\", action=\"store_true\", help=\"Disable parallel processing\"\n )\n parser.add_argument(\n \"--noautoopen\",\n action=\"store_true\",\n help=\"Do not open in browser automatically\",\n )\n parser.add_argument(\n \"--dml\",\n action=\"store_true\",\n help=\"torch_dml\",\n )\n cmd_opts: argparse.Namespace = parser.parse_args()\n\n cmd_opts.port = cmd_opts.port if 0 <= cmd_opts.port <= 65535 else 7865\n\n return (\n cmd_opts.pycmd,\n cmd_opts.port,\n cmd_opts.noparallel,\n cmd_opts.noautoopen,\n cmd_opts.dml,\n )\n\n @staticmethod\n def has_mps() -> bool:\n return torch.backends.mps.is_available() and not torch.zeros(1).to(\n torch.device(\"mps\")\n )\n\n @staticmethod\n def has_xpu() -> bool:\n return hasattr(torch, \"xpu\") and torch.xpu.is_available()\n\n def use_fp32_config(self) -> None:\n for config_file, data in self.json_config.items():\n try:\n data[\"train\"][\"fp16_run\"] = False\n with open(config_file, \"w\") as json_file:\n json.dump(data, json_file, indent=4)\n except Exception as e:\n logger.info(f\"Error updating {config_file}: {str(e)}\")\n logger.info(\"overwrite configs.json\")\n\n def device_config(self) -> tuple:\n if torch.cuda.is_available():\n if self.has_xpu():\n self.device = self.instead = \"xpu:0\"\n self.is_half = True\n i_device = int(self.device.split(\":\")[-1])\n self.gpu_name = torch.cuda.get_device_name(i_device)\n if (\n (\"16\" in self.gpu_name and \"V100\" not in self.gpu_name.upper())\n or \"P40\" in self.gpu_name.upper()\n or \"P10\" in self.gpu_name.upper()\n or \"1060\" in self.gpu_name\n or \"1070\" in self.gpu_name\n or \"1080\" in self.gpu_name\n ):\n logger.info(f\"Found GPU {self.gpu_name}, force to fp32\")\n self.is_half = False\n self.use_fp32_config()\n else:\n logger.info(f\"Found GPU {self.gpu_name}\")\n self.gpu_mem = int(\n torch.cuda.get_device_properties(i_device).total_memory\n / 1024\n / 1024\n / 1024\n + 0.4\n )\n elif self.has_mps():\n logger.info(\"No supported Nvidia GPU found\")\n self.device = self.instead = \"mps\"\n self.is_half = False\n self.use_fp32_config()\n elif self.dml:\n import torch_directml\n\n self.device = torch_directml.device(torch_directml.default_device())\n self.is_half = False\n else:\n logger.info(\"No supported Nvidia GPU found\")\n self.device = self.instead = \"cpu\"\n self.is_half = False\n self.use_fp32_config()\n\n if self.gpu_mem is not None and self.gpu_mem <= 4:\n x_pad = 1\n x_query = 5\n x_center = 30\n x_max = 32\n elif self.is_half:\n # 6G PU_RAM conf\n x_pad = 3\n x_query = 10\n x_center = 60\n x_max = 65\n else:\n # 5G GPU_RAM conf\n x_pad = 1\n x_query = 6\n x_center = 38\n x_max = 41\n\n logger.info(f\"Use {self.dml or self.instead} instead\")\n logger.info(f\"is_half:{self.is_half}, device:{self.device}\")\n return x_pad, x_query, x_center, x_max" }, { "identifier": "load_audio", "path": "rvc/lib/audio.py", "snippet": "def load_audio(file, sr):\r\n if not os.path.exists(file):\r\n raise RuntimeError(\r\n \"You input a wrong audio path that does not exists, please fix it!\"\r\n )\r\n try:\r\n with open(file, \"rb\") as f:\r\n with BytesIO() as out:\r\n audio2(f, out, \"f32le\", sr)\r\n return np.frombuffer(out.getvalue(), np.float32).flatten()\r\n\r\n except AttributeError:\r\n audio = file[1] / 32768.0\r\n if len(audio.shape) == 2:\r\n audio = np.mean(audio, -1)\r\n return librosa.resample(audio, orig_sr=file[0], target_sr=16000)\r\n\r\n except Exception:\r\n raise RuntimeError(traceback.format_exc())\r" }, { "identifier": "wav2", "path": "rvc/lib/audio.py", "snippet": "def wav2(i, o, format):\r\n inp = av.open(i, \"rb\")\r\n if format == \"m4a\":\r\n format = \"mp4\"\r\n out = av.open(o, \"wb\", format=format)\r\n if format == \"ogg\":\r\n format = \"libvorbis\"\r\n if format == \"mp4\":\r\n format = \"aac\"\r\n\r\n ostream = out.add_stream(format)\r\n\r\n for frame in inp.decode(audio=0):\r\n for p in ostream.encode(frame):\r\n out.mux(p)\r\n\r\n for p in ostream.encode(None):\r\n out.mux(p)\r\n\r\n out.close()\r\n inp.close()\r" }, { "identifier": "SynthesizerTrnMs256NSFsid", "path": "rvc/lib/infer_pack/models.py", "snippet": "class SynthesizerTrnMs256NSFsid(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr,\n **kwargs\n ):\n super(SynthesizerTrnMs256NSFsid, self).__init__()\n if isinstance(sr, str):\n sr = sr2sr[sr]\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = float(p_dropout)\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder256(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n float(p_dropout),\n )\n self.dec = GeneratorNSF(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n sr=sr,\n is_half=kwargs[\"is_half\"],\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n logger.debug(\n \"gin_channels: \"\n + str(gin_channels)\n + \", self.spk_embed_dim: \"\n + str(self.spk_embed_dim)\n )\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def __prepare_scriptable__(self):\n for hook in self.dec._forward_pre_hooks.values():\n # The hook we want to remove is an instance of WeightNorm class, so\n # normally we would do `if isinstance(...)` but this class is not accessible\n # because of shadowing, so we check the module name directly.\n # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.dec)\n for hook in self.flow._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.flow)\n if hasattr(self, \"enc_q\"):\n for hook in self.enc_q._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.enc_q)\n return self\n\n @torch.jit.ignore\n def forward(\n self,\n phone: torch.Tensor,\n phone_lengths: torch.Tensor,\n pitch: torch.Tensor,\n pitchf: torch.Tensor,\n y: torch.Tensor,\n y_lengths: torch.Tensor,\n ds: Optional[torch.Tensor] = None,\n ): # 这里ds是id,[bs,1]\n # print(1,pitch.shape)#[bs,t]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)\n pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)\n # print(-2,pitchf.shape,z_slice.shape)\n o = self.dec(z_slice, pitchf, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n @torch.jit.export\n def infer(\n self,\n phone: torch.Tensor,\n phone_lengths: torch.Tensor,\n pitch: torch.Tensor,\n nsff0: torch.Tensor,\n sid: torch.Tensor,\n rate: Optional[torch.Tensor] = None,\n ):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate is not None:\n assert isinstance(rate, torch.Tensor)\n head = int(z_p.shape[2] * (1 - rate.item()))\n z_p = z_p[:, :, head:]\n x_mask = x_mask[:, :, head:]\n nsff0 = nsff0[:, head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, nsff0, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "SynthesizerTrnMs256NSFsid_nono", "path": "rvc/lib/infer_pack/models.py", "snippet": "class SynthesizerTrnMs256NSFsid_nono(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr=None,\n **kwargs\n ):\n super(SynthesizerTrnMs256NSFsid_nono, self).__init__()\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = float(p_dropout)\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder256(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n float(p_dropout),\n f0=False,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n logger.debug(\n \"gin_channels: \"\n + str(gin_channels)\n + \", self.spk_embed_dim: \"\n + str(self.spk_embed_dim)\n )\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def __prepare_scriptable__(self):\n for hook in self.dec._forward_pre_hooks.values():\n # The hook we want to remove is an instance of WeightNorm class, so\n # normally we would do `if isinstance(...)` but this class is not accessible\n # because of shadowing, so we check the module name directly.\n # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.dec)\n for hook in self.flow._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.flow)\n if hasattr(self, \"enc_q\"):\n for hook in self.enc_q._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.enc_q)\n return self\n\n @torch.jit.ignore\n def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n @torch.jit.export\n def infer(\n self,\n phone: torch.Tensor,\n phone_lengths: torch.Tensor,\n sid: torch.Tensor,\n rate: Optional[torch.Tensor] = None,\n ):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate is not None:\n head = int(z_p.shape[2] * (1.0 - rate.item()))\n z_p = z_p[:, :, head:]\n x_mask = x_mask[:, :, head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "SynthesizerTrnMs768NSFsid", "path": "rvc/lib/infer_pack/models.py", "snippet": "class SynthesizerTrnMs768NSFsid(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr,\n **kwargs\n ):\n super(SynthesizerTrnMs768NSFsid, self).__init__()\n if isinstance(sr, str):\n sr = sr2sr[sr]\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = float(p_dropout)\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder768(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n float(p_dropout),\n )\n self.dec = GeneratorNSF(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n sr=sr,\n is_half=kwargs[\"is_half\"],\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n logger.debug(\n \"gin_channels: \"\n + str(gin_channels)\n + \", self.spk_embed_dim: \"\n + str(self.spk_embed_dim)\n )\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def __prepare_scriptable__(self):\n for hook in self.dec._forward_pre_hooks.values():\n # The hook we want to remove is an instance of WeightNorm class, so\n # normally we would do `if isinstance(...)` but this class is not accessible\n # because of shadowing, so we check the module name directly.\n # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.dec)\n for hook in self.flow._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.flow)\n if hasattr(self, \"enc_q\"):\n for hook in self.enc_q._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.enc_q)\n return self\n\n @torch.jit.ignore\n def forward(\n self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds\n ): # 这里ds是id,[bs,1]\n # print(1,pitch.shape)#[bs,t]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)\n pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)\n # print(-2,pitchf.shape,z_slice.shape)\n o = self.dec(z_slice, pitchf, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n @torch.jit.export\n def infer(\n self,\n phone: torch.Tensor,\n phone_lengths: torch.Tensor,\n pitch: torch.Tensor,\n nsff0: torch.Tensor,\n sid: torch.Tensor,\n rate: Optional[torch.Tensor] = None,\n ):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate is not None:\n head = int(z_p.shape[2] * (1.0 - rate.item()))\n z_p = z_p[:, :, head:]\n x_mask = x_mask[:, :, head:]\n nsff0 = nsff0[:, head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, nsff0, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "SynthesizerTrnMs768NSFsid_nono", "path": "rvc/lib/infer_pack/models.py", "snippet": "class SynthesizerTrnMs768NSFsid_nono(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr=None,\n **kwargs\n ):\n super(SynthesizerTrnMs768NSFsid_nono, self).__init__()\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = float(p_dropout)\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder768(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n float(p_dropout),\n f0=False,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n logger.debug(\n \"gin_channels: \"\n + str(gin_channels)\n + \", self.spk_embed_dim: \"\n + str(self.spk_embed_dim)\n )\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def __prepare_scriptable__(self):\n for hook in self.dec._forward_pre_hooks.values():\n # The hook we want to remove is an instance of WeightNorm class, so\n # normally we would do `if isinstance(...)` but this class is not accessible\n # because of shadowing, so we check the module name directly.\n # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.dec)\n for hook in self.flow._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.flow)\n if hasattr(self, \"enc_q\"):\n for hook in self.enc_q._forward_pre_hooks.values():\n if (\n hook.__module__ == \"torch.nn.utils.weight_norm\"\n and hook.__class__.__name__ == \"WeightNorm\"\n ):\n torch.nn.utils.remove_weight_norm(self.enc_q)\n return self\n\n @torch.jit.ignore\n def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n @torch.jit.export\n def infer(\n self,\n phone: torch.Tensor,\n phone_lengths: torch.Tensor,\n sid: torch.Tensor,\n rate: Optional[torch.Tensor] = None,\n ):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate is not None:\n head = int(z_p.shape[2] * (1.0 - rate.item()))\n z_p = z_p[:, :, head:]\n x_mask = x_mask[:, :, head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "Pipeline", "path": "rvc/modules/vc/pipeline.py", "snippet": "class Pipeline(object):\n def __init__(self, tgt_sr, config):\n self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = (\n config.x_pad,\n config.x_query,\n config.x_center,\n config.x_max,\n config.is_half,\n )\n self.sr = 16000 # hubert输入采样率\n self.window = 160 # 每帧点数\n self.t_pad = self.sr * self.x_pad # 每条前后pad时间\n self.t_pad_tgt = tgt_sr * self.x_pad\n self.t_pad2 = self.t_pad * 2\n self.t_query = self.sr * self.x_query # 查询切点前后查询时间\n self.t_center = self.sr * self.x_center # 查询切点位置\n self.t_max = self.sr * self.x_max # 免查询时长阈值\n self.device = config.device\n\n def get_f0(\n self,\n input_audio_path,\n x,\n p_len,\n f0_up_key,\n f0_method,\n filter_radius,\n inp_f0=None,\n ):\n global input_audio_path2wav\n time_step = self.window / self.sr * 1000\n f0_min = 50\n f0_max = 1100\n f0_mel_min = 1127 * np.log(1 + f0_min / 700)\n f0_mel_max = 1127 * np.log(1 + f0_max / 700)\n if f0_method == \"pm\":\n f0 = (\n parselmouth.Sound(x, self.sr)\n .to_pitch_ac(\n time_step=time_step / 1000,\n voicing_threshold=0.6,\n pitch_floor=f0_min,\n pitch_ceiling=f0_max,\n )\n .selected_array[\"frequency\"]\n )\n pad_size = (p_len - len(f0) + 1) // 2\n if pad_size > 0 or p_len - len(f0) - pad_size > 0:\n f0 = np.pad(\n f0, [[pad_size, p_len - len(f0) - pad_size]], mode=\"constant\"\n )\n elif f0_method == \"harvest\":\n input_audio_path2wav[input_audio_path] = x.astype(np.double)\n f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10)\n if filter_radius > 2:\n f0 = signal.medfilt(f0, 3)\n elif f0_method == \"crepe\":\n model = \"full\"\n # Pick a batch size that doesn't cause memory errors on your gpu\n batch_size = 512\n # Compute pitch using first gpu\n audio = torch.tensor(np.copy(x))[None].float()\n f0, pd = torchcrepe.predict(\n audio,\n self.sr,\n self.window,\n f0_min,\n f0_max,\n model,\n batch_size=batch_size,\n device=self.device,\n return_periodicity=True,\n )\n pd = torchcrepe.filter.median(pd, 3)\n f0 = torchcrepe.filter.mean(f0, 3)\n f0[pd < 0.1] = 0\n f0 = f0[0].cpu().numpy()\n elif f0_method == \"rmvpe\":\n if not hasattr(self, \"model_rmvpe\"):\n from rvc.lib.rmvpe import RMVPE\n\n logger.info(\n \"Loading rmvpe model,%s\" % \"%s/rmvpe.pt\" % os.environ[\"rmvpe_root\"]\n )\n self.model_rmvpe = RMVPE(\n \"%s/rmvpe.pt\" % os.environ[\"rmvpe_root\"],\n is_half=self.is_half,\n device=self.device,\n )\n f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03)\n\n if \"privateuseone\" in str(self.device): # clean ortruntime memory\n del self.model_rmvpe.model\n del self.model_rmvpe\n logger.info(\"Cleaning ortruntime memory\")\n\n f0 *= pow(2, f0_up_key / 12)\n # with open(\"test.txt\",\"w\")as f:f.write(\"\\n\".join([str(i)for i in f0.tolist()]))\n tf0 = self.sr // self.window # 每秒f0点数\n if inp_f0 is not None:\n delta_t = np.round(\n (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1\n ).astype(\"int16\")\n replace_f0 = np.interp(\n list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]\n )\n shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0]\n f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[\n :shape\n ]\n # with open(\"test_opt.txt\",\"w\")as f:f.write(\"\\n\".join([str(i)for i in f0.tolist()]))\n f0bak = f0.copy()\n f0_mel = 1127 * np.log(1 + f0 / 700)\n f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (\n f0_mel_max - f0_mel_min\n ) + 1\n f0_mel[f0_mel <= 1] = 1\n f0_mel[f0_mel > 255] = 255\n f0_coarse = np.rint(f0_mel).astype(np.int32)\n return f0_coarse, f0bak # 1-0\n\n def vc(\n self,\n model,\n net_g,\n sid,\n audio0,\n pitch,\n pitchf,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n ): # ,file_index,file_big_npy\n feats = torch.from_numpy(audio0)\n if self.is_half:\n feats = feats.half()\n else:\n feats = feats.float()\n if feats.dim() == 2: # double channels\n feats = feats.mean(-1)\n assert feats.dim() == 1, feats.dim()\n feats = feats.view(1, -1)\n padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)\n\n inputs = {\n \"source\": feats.to(self.device),\n \"padding_mask\": padding_mask,\n \"output_layer\": 9 if version == \"v1\" else 12,\n }\n t0 = ttime()\n with torch.no_grad():\n logits = model.extract_features(**inputs)\n feats = model.final_proj(logits[0]) if version == \"v1\" else logits[0]\n if protect < 0.5 and pitch is not None and pitchf is not None:\n feats0 = feats.clone()\n if (\n not isinstance(index, type(None))\n and not isinstance(big_npy, type(None))\n and index_rate != 0\n ):\n npy = feats[0].cpu().numpy()\n if self.is_half:\n npy = npy.astype(\"float32\")\n\n # _, I = index.search(npy, 1)\n # npy = big_npy[I.squeeze()]\n\n score, ix = index.search(npy, k=8)\n weight = np.square(1 / score)\n weight /= weight.sum(axis=1, keepdims=True)\n npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)\n\n if self.is_half:\n npy = npy.astype(\"float16\")\n feats = (\n torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate\n + (1 - index_rate) * feats\n )\n\n feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)\n if protect < 0.5 and pitch is not None and pitchf is not None:\n feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute(\n 0, 2, 1\n )\n t1 = ttime()\n p_len = audio0.shape[0] // self.window\n if feats.shape[1] < p_len:\n p_len = feats.shape[1]\n if pitch is not None and pitchf is not None:\n pitch = pitch[:, :p_len]\n pitchf = pitchf[:, :p_len]\n\n if protect < 0.5 and pitch is not None and pitchf is not None:\n pitchff = pitchf.clone()\n pitchff[pitchf > 0] = 1\n pitchff[pitchf < 1] = protect\n pitchff = pitchff.unsqueeze(-1)\n feats = feats * pitchff + feats0 * (1 - pitchff)\n feats = feats.to(feats0.dtype)\n p_len = torch.tensor([p_len], device=self.device).long()\n with torch.no_grad():\n hasp = pitch is not None and pitchf is not None\n arg = (feats, p_len, pitch, pitchf, sid) if hasp else (feats, p_len, sid)\n audio1 = (net_g.infer(*arg)[0][0, 0]).data.cpu().float().numpy()\n del hasp, arg\n del feats, p_len, padding_mask\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n t2 = ttime()\n times[\"npy\"] += t1 - t0\n times[\"infer\"] += t2 - t1\n return audio1\n\n def pipeline(\n self,\n model,\n net_g,\n sid,\n audio,\n input_audio_path,\n times,\n f0_up_key,\n f0_method,\n file_index,\n index_rate,\n if_f0,\n filter_radius,\n tgt_sr,\n resample_sr,\n rms_mix_rate,\n version,\n protect,\n f0_file=None,\n ):\n if (\n file_index\n and file_index != \"\"\n # and file_big_npy != \"\"\n # and os.path.exists(file_big_npy) == True\n and os.path.exists(file_index)\n and index_rate != 0\n ):\n try:\n index = faiss.read_index(file_index)\n # big_npy = np.load(file_big_npy)\n big_npy = index.reconstruct_n(0, index.ntotal)\n except:\n traceback.print_exc()\n index = big_npy = None\n else:\n index = big_npy = None\n audio = signal.filtfilt(bh, ah, audio)\n audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode=\"reflect\")\n opt_ts = []\n if audio_pad.shape[0] > self.t_max:\n audio_sum = np.zeros_like(audio)\n for i in range(self.window):\n audio_sum += np.abs(audio_pad[i : i - self.window])\n for t in range(self.t_center, audio.shape[0], self.t_center):\n opt_ts.append(\n t\n - self.t_query\n + np.where(\n audio_sum[t - self.t_query : t + self.t_query]\n == audio_sum[t - self.t_query : t + self.t_query].min()\n )[0][0]\n )\n s = 0\n audio_opt = []\n t = None\n t1 = ttime()\n audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode=\"reflect\")\n p_len = audio_pad.shape[0] // self.window\n inp_f0 = None\n if hasattr(f0_file, \"name\"):\n try:\n with open(f0_file.name, \"r\") as f:\n lines = f.read().strip(\"\\n\").split(\"\\n\")\n inp_f0 = []\n for line in lines:\n inp_f0.append([float(i) for i in line.split(\",\")])\n inp_f0 = np.array(inp_f0, dtype=\"float32\")\n except:\n traceback.print_exc()\n sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()\n pitch, pitchf = None, None\n if if_f0 == 1:\n pitch, pitchf = self.get_f0(\n input_audio_path,\n audio_pad,\n p_len,\n f0_up_key,\n f0_method,\n filter_radius,\n inp_f0,\n )\n pitch = pitch[:p_len]\n pitchf = pitchf[:p_len]\n if \"mps\" not in str(self.device) or \"xpu\" not in str(self.device):\n pitchf = pitchf.astype(np.float32)\n pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()\n pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()\n t2 = ttime()\n times[\"f0\"] += t2 - t1\n for t in opt_ts:\n t = t // self.window * self.window\n if if_f0 == 1:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[s : t + self.t_pad2 + self.window],\n pitch[:, s // self.window : (t + self.t_pad2) // self.window],\n pitchf[:, s // self.window : (t + self.t_pad2) // self.window],\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n else:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[s : t + self.t_pad2 + self.window],\n None,\n None,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n s = t\n if if_f0 == 1:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[t:],\n pitch[:, t // self.window :] if t is not None else pitch,\n pitchf[:, t // self.window :] if t is not None else pitchf,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n else:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[t:],\n None,\n None,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n audio_opt = np.concatenate(audio_opt)\n if rms_mix_rate != 1:\n audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate)\n if tgt_sr != resample_sr >= 16000:\n audio_opt = librosa.resample(\n audio_opt, orig_sr=tgt_sr, target_sr=resample_sr\n )\n audio_max = np.abs(audio_opt).max() / 0.99\n max_int16 = 32768\n if audio_max > 1:\n max_int16 /= audio_max\n audio_opt = (audio_opt * max_int16).astype(np.int16)\n del pitch, pitchf, sid\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n return audio_opt" } ]
import logging import os import traceback import numpy as np import soundfile as sf import torch from collections import OrderedDict from io import BytesIO from pathlib import Path from rvc.configs.config import Config from rvc.lib.audio import load_audio, wav2 from rvc.lib.infer_pack.models import ( SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono, SynthesizerTrnMs768NSFsid, SynthesizerTrnMs768NSFsid_nono, ) from rvc.modules.vc.pipeline import Pipeline from rvc.modules.vc.utils import *
11,833
logger: logging.Logger = logging.getLogger(__name__) class VC: def __init__(self): self.n_spk: any = None self.tgt_sr: int | None = None self.net_g = None self.pipeline: Pipeline | None = None self.cpt: OrderedDict | None = None self.version: str | None = None self.if_f0: int | None = None self.version: str | None = None self.hubert_model: any = None
logger: logging.Logger = logging.getLogger(__name__) class VC: def __init__(self): self.n_spk: any = None self.tgt_sr: int | None = None self.net_g = None self.pipeline: Pipeline | None = None self.cpt: OrderedDict | None = None self.version: str | None = None self.if_f0: int | None = None self.version: str | None = None self.hubert_model: any = None
self.config = Config()
0
2023-10-14 09:52:31+00:00
16k
zhijie-group/LOVECon
video_diffusion/trainer/ddpm_trainer.py
[ { "identifier": "UNetPseudo3DConditionModel", "path": "video_diffusion/models/unet_3d_condition.py", "snippet": "class UNetPseudo3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"DownBlockPseudo3D\",\n ),\n mid_block_type: str = \"UNetMidBlockPseudo3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlockPseudo3D\",\n \"CrossAttnUpBlockPseudo3D\",\n \"CrossAttnUpBlockPseudo3D\",\n \"CrossAttnUpBlockPseudo3D\",\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n **kwargs\n ):\n super().__init__()\n\n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n if 'temporal_downsample' in kwargs and kwargs['temporal_downsample'] is True:\n kwargs['temporal_downsample_time'] = 3\n self.temporal_downsample_time = kwargs.get('temporal_downsample_time', 0)\n \n # input\n self.conv_in = PseudoConv3d(in_channels, block_out_channels[0], \n kernel_size=3, padding=(1, 1), model_config=kwargs)\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n kwargs_copy=copy.deepcopy(kwargs)\n temporal_downsample_i = ((i >= (len(down_block_types)-self.temporal_downsample_time))\n and (not is_final_block))\n kwargs_copy.update({'temporal_downsample': temporal_downsample_i} )\n # kwargs_copy.update({'SparseCausalAttention_index': temporal_downsample_i} )\n if temporal_downsample_i:\n print(f'Initialize model temporal downsample at layer {i}')\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n model_config=kwargs_copy\n )\n self.down_blocks.append(down_block)\n # mid\n if mid_block_type == \"UNetMidBlockPseudo3DCrossAttn\":\n self.mid_block = UNetMidBlockPseudo3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n model_config=kwargs\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n\n # count how many layers upsample the images\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n \n kwargs_copy=copy.deepcopy(kwargs)\n kwargs_copy.update({'temporal_downsample': \n i < (self.temporal_downsample_time-1)})\n if i < (self.temporal_downsample_time-1):\n print(f'Initialize model temporal updample at layer {i}')\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n model_config=kwargs_copy\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n self.conv_norm_out = nn.GroupNorm(\n num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps\n )\n self.conv_act = nn.SiLU()\n self.conv_out = PseudoConv3d(block_out_channels[0], out_channels, \n kernel_size=3, padding=1, model_config=kwargs)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = (\n num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n )\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(\n module,\n (CrossAttnDownBlockPseudo3D, DownBlockPseudo3D, CrossAttnUpBlockPseudo3D, UpBlockPseudo3D),\n ):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None, # None\n attention_mask: Optional[torch.Tensor] = None, # None\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNetPseudo3DConditionOutput, Tuple]:\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None: # None\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 0. center input if necessary\n if self.config.center_input_sample: # False\n sample = 2 * sample - 1.0\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # 2. pre-process\n sample = self.conv_in(sample)\n\n # 3. down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb)\n\n down_block_res_samples += res_samples\n\n if down_block_additional_residuals is not None:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n new_down_block_res_samples += (down_block_res_sample + down_block_additional_residual,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # 4. mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n # for i in down_block_res_samples: print(i.shape) \n # torch.Size([1, 320, 16, 64, 64])\n # torch.Size([1, 320, 16, 64, 64])\n # torch.Size([1, 320, 16, 64, 64])\n # torch.Size([1, 320, 8, 32, 32])\n # torch.Size([1, 640, 8, 32, 32])\n # torch.Size([1, 640, 8, 32, 32])\n # torch.Size([1, 640, 4, 16, 16])\n # torch.Size([1, 1280, 4, 16, 16])\n # torch.Size([1, 1280, 4, 16, 16])\n # torch.Size([1, 1280, 2, 8, 8])\n # torch.Size([1, 1280, 2, 8, 8])\n # torch.Size([1, 1280, 2, 8, 8])\n if mid_block_additional_residual is not None:\n sample = sample + mid_block_additional_residual\n \n # 5. up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n upsample_size=upsample_size,\n )\n # 6. post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNetPseudo3DConditionOutput(sample=sample)\n\n @classmethod\n def from_2d_model(cls, model_path, model_config):\n config_path = os.path.join(model_path, \"config.json\")\n if not os.path.isfile(config_path):\n raise RuntimeError(f\"{config_path} does not exist\")\n with open(config_path, \"r\") as f:\n config = json.load(f)\n\n config.pop(\"_class_name\")\n config.pop(\"_diffusers_version\")\n\n block_replacer = {\n \"CrossAttnDownBlock2D\": \"CrossAttnDownBlockPseudo3D\",\n \"DownBlock2D\": \"DownBlockPseudo3D\",\n \"UpBlock2D\": \"UpBlockPseudo3D\",\n \"CrossAttnUpBlock2D\": \"CrossAttnUpBlockPseudo3D\",\n }\n\n def convert_2d_to_3d_block(block):\n return block_replacer[block] if block in block_replacer else block\n\n config[\"down_block_types\"] = [\n convert_2d_to_3d_block(block) for block in config[\"down_block_types\"]\n ]\n config[\"up_block_types\"] = [convert_2d_to_3d_block(block) for block in config[\"up_block_types\"]]\n if model_config is not None:\n config.update(model_config)\n\n model = cls(**config)\n\n state_dict_path_condidates = glob.glob(os.path.join(model_path, \"*.bin\"))\n if state_dict_path_condidates:\n state_dict = torch.load(state_dict_path_condidates[0], map_location=\"cpu\")\n model.load_2d_state_dict(state_dict=state_dict)\n\n return model\n\n def load_2d_state_dict(self, state_dict, **kwargs):\n state_dict_3d = self.state_dict()\n\n for k, v in state_dict.items():\n if k not in state_dict_3d:\n raise KeyError(f\"2d state_dict key {k} does not exist in 3d model\")\n elif v.shape != state_dict_3d[k].shape:\n raise ValueError(f\"state_dict shape mismatch, 2d {v.shape}, 3d {state_dict_3d[k].shape}\")\n\n for k, v in state_dict_3d.items():\n if \"_temporal\" in k:\n continue\n if k not in state_dict:\n raise KeyError(f\"3d state_dict key {k} does not exist in 2d model\")\n\n state_dict_3d.update(state_dict)\n self.load_state_dict(state_dict_3d, **kwargs)" }, { "identifier": "SpatioTemporalStableDiffusionPipeline", "path": "video_diffusion/pipelines/stable_diffusion.py", "snippet": "class SpatioTemporalStableDiffusionPipeline(DiffusionPipeline):\n r\"\"\"\n Pipeline for text-to-video generation using Spatio-Temporal Stable Diffusion.\n \"\"\"\n _optional_components = []\n\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNetPseudo3DConditionModel,\n scheduler: Union[\n DDIMScheduler,\n PNDMScheduler,\n LMSDiscreteScheduler,\n EulerDiscreteScheduler,\n EulerAncestralDiscreteScheduler,\n DPMSolverMultistepScheduler,\n ],\n ):\n super().__init__()\n\n if hasattr(scheduler.config, \"steps_offset\") and scheduler.config.steps_offset != 1:\n deprecation_message = (\n f\"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`\"\n f\" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure \"\n \"to update the config accordingly as leaving `steps_offset` might led to incorrect results\"\n \" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,\"\n \" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`\"\n \" file\"\n )\n deprecate(\"steps_offset!=1\", \"1.0.0\", deprecation_message, standard_warn=False)\n new_config = dict(scheduler.config)\n new_config[\"steps_offset\"] = 1\n scheduler._internal_dict = FrozenDict(new_config)\n\n if hasattr(scheduler.config, \"clip_sample\") and scheduler.config.clip_sample is True:\n deprecation_message = (\n f\"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`.\"\n \" `clip_sample` should be set to False in the configuration file. Please make sure to update the\"\n \" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in\"\n \" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very\"\n \" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file\"\n )\n deprecate(\"clip_sample not set\", \"1.0.0\", deprecation_message, standard_warn=False)\n new_config = dict(scheduler.config)\n new_config[\"clip_sample\"] = False\n scheduler._internal_dict = FrozenDict(new_config)\n\n is_unet_version_less_0_9_0 = hasattr(unet.config, \"_diffusers_version\") and version.parse(\n version.parse(unet.config._diffusers_version).base_version\n ) < version.parse(\"0.9.0.dev0\")\n is_unet_sample_size_less_64 = (\n hasattr(unet.config, \"sample_size\") and unet.config.sample_size < 64\n )\n if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:\n deprecation_message = (\n \"The configuration file of the unet has set the default `sample_size` to smaller than\"\n \" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the\"\n \" following: \\n- CompVis/stable-diffusion-v1-4 \\n- CompVis/stable-diffusion-v1-3 \\n-\"\n \" CompVis/stable-diffusion-v1-2 \\n- CompVis/stable-diffusion-v1-1 \\n- runwayml/stable-diffusion-v1-5\"\n \" \\n- runwayml/stable-diffusion-inpainting \\n you should change 'sample_size' to 64 in the\"\n \" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`\"\n \" in the config might lead to incorrect results in future versions. If you have downloaded this\"\n \" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for\"\n \" the `unet/config.json` file\"\n )\n deprecate(\"sample_size<64\", \"1.0.0\", deprecation_message, standard_warn=False)\n new_config = dict(unet.config)\n new_config[\"sample_size\"] = 64\n unet._internal_dict = FrozenDict(new_config)\n\n self.register_modules(\n vae=vae,\n text_encoder=text_encoder,\n tokenizer=tokenizer,\n unet=unet,\n scheduler=scheduler,\n )\n self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)\n def prepare_before_train_loop(self, params_to_optimize=None):\n # Set xformers in train.py\n \n # self.disable_xformers_memory_efficient_attention()\n\n self.vae.requires_grad_(False)\n self.unet.requires_grad_(False)\n self.text_encoder.requires_grad_(False)\n\n self.vae.eval()\n self.unet.eval()\n self.text_encoder.eval()\n \n if params_to_optimize is not None:\n params_to_optimize.requires_grad = True\n def enable_vae_slicing(self):\n r\"\"\"\n Enable sliced VAE decoding.\n\n When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several\n steps. This is useful to save some memory and allow larger batch sizes.\n \"\"\"\n self.vae.enable_slicing()\n\n def disable_vae_slicing(self):\n r\"\"\"\n Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to\n computing decoding in one step.\n \"\"\"\n self.vae.disable_slicing()\n\n def enable_sequential_cpu_offload(self, gpu_id=0):\n r\"\"\"\n Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,\n text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a\n `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.\n \"\"\"\n if is_accelerate_available():\n from accelerate import cpu_offload\n else:\n raise ImportError(\"Please install accelerate via `pip install accelerate`\")\n\n device = torch.device(f\"cuda:{gpu_id}\")\n\n for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:\n if cpu_offloaded_model is not None:\n cpu_offload(cpu_offloaded_model, device)\n\n @property\n def _execution_device(self):\n r\"\"\"\n Returns the device on which the pipeline's models will be executed. After calling\n `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module\n hooks.\n \"\"\"\n if self.device != torch.device(\"meta\") or not hasattr(self.unet, \"_hf_hook\"):\n return self.device\n for module in self.unet.modules():\n if (\n hasattr(module, \"_hf_hook\")\n and hasattr(module._hf_hook, \"execution_device\")\n and module._hf_hook.execution_device is not None\n ):\n return torch.device(module._hf_hook.execution_device)\n return self.device\n\n def _encode_prompt(\n self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt\n ):\n r\"\"\"\n Encodes the prompt into text encoder hidden states.\n\n Args:\n prompt (`str` or `list(int)`):\n prompt to be encoded\n device: (`torch.device`):\n torch device\n num_images_per_prompt (`int`):\n number of images that should be generated per prompt\n do_classifier_free_guidance (`bool`):\n whether to use classifier free guidance or not\n negative_prompt (`str` or `List[str]`):\n The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored\n if `guidance_scale` is less than `1`).\n \"\"\"\n batch_size = len(prompt) if isinstance(prompt, list) else 1\n\n text_inputs = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n text_input_ids = text_inputs.input_ids\n untruncated_ids = self.tokenizer(prompt, padding=\"longest\", return_tensors=\"pt\").input_ids\n\n if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(\n text_input_ids, untruncated_ids\n ):\n removed_text = self.tokenizer.batch_decode(\n untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]\n )\n logger.warning(\n \"The following part of your input was truncated because CLIP can only handle sequences up to\"\n f\" {self.tokenizer.model_max_length} tokens: {removed_text}\"\n )\n\n if (\n hasattr(self.text_encoder.config, \"use_attention_mask\")\n and self.text_encoder.config.use_attention_mask\n ):\n attention_mask = text_inputs.attention_mask.to(device)\n else:\n attention_mask = None\n\n text_embeddings = self.text_encoder(\n text_input_ids.to(device),\n attention_mask=attention_mask,\n )\n text_embeddings = text_embeddings[0]\n\n # duplicate text embeddings for each generation per prompt, using mps friendly method\n bs_embed, seq_len, _ = text_embeddings.shape\n text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)\n text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)\n\n # get unconditional embeddings for classifier free guidance\n if do_classifier_free_guidance:\n uncond_tokens: List[str]\n if negative_prompt is None:\n uncond_tokens = [\"\"] * batch_size\n elif type(prompt) is not type(negative_prompt):\n raise TypeError(\n f\"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=\"\n f\" {type(prompt)}.\"\n )\n elif isinstance(negative_prompt, str):\n uncond_tokens = [negative_prompt]\n elif batch_size != len(negative_prompt):\n raise ValueError(\n f\"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:\"\n f\" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches\"\n \" the batch size of `prompt`.\"\n )\n else:\n uncond_tokens = negative_prompt\n\n max_length = text_input_ids.shape[-1]\n uncond_input = self.tokenizer(\n uncond_tokens,\n padding=\"max_length\",\n max_length=max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n\n if (\n hasattr(self.text_encoder.config, \"use_attention_mask\")\n and self.text_encoder.config.use_attention_mask\n ):\n attention_mask = uncond_input.attention_mask.to(device)\n else:\n attention_mask = None\n\n uncond_embeddings = self.text_encoder(\n uncond_input.input_ids.to(device),\n attention_mask=attention_mask,\n )\n uncond_embeddings = uncond_embeddings[0]\n\n # duplicate unconditional embeddings for each generation per prompt, using mps friendly method\n seq_len = uncond_embeddings.shape[1]\n uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)\n uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)\n\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n text_embeddings = torch.cat([uncond_embeddings, text_embeddings])\n\n return text_embeddings\n\n def decode_latents(self, latents):\n is_video = (latents.dim() == 5)\n b = latents.shape[0]\n latents = 1 / 0.18215 * latents\n \n if is_video:\n latents = rearrange(latents, \"b c f h w -> (b f) c h w\") # torch.Size([70, 4, 64, 64])\n\n latents_split = torch.split(latents, 16, dim=0)\n image = torch.cat([self.vae.decode(l).sample for l in latents_split], dim=0)\n \n # image_full = self.vae.decode(latents).sample\n # RuntimeError: upsample_nearest_nhwc only supports output tensors with less than INT_MAX elements\n # Pytorch upsample alogrithm not work for batch size 32 -> 64 \n image = (image / 2 + 0.5).clamp(0, 1)\n # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16\n\n image = image.cpu().float().numpy()\n if is_video:\n image = rearrange(image, \"(b f) c h w -> b f h w c\", b=b)\n else:\n image = rearrange(image, \"b c h w -> b h w c\", b=b)\n return image\n\n def prepare_extra_step_kwargs(self, generator, eta):\n # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature\n # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.\n # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502\n # and should be between [0, 1]\n\n accepts_eta = \"eta\" in set(inspect.signature(self.scheduler.step).parameters.keys())\n extra_step_kwargs = {}\n if accepts_eta:\n extra_step_kwargs[\"eta\"] = eta\n\n # check if the scheduler accepts generator\n accepts_generator = \"generator\" in set(inspect.signature(self.scheduler.step).parameters.keys())\n if accepts_generator:\n extra_step_kwargs[\"generator\"] = generator\n return extra_step_kwargs\n\n def check_inputs(self, prompt, height, width, callback_steps):\n if not isinstance(prompt, str) and not isinstance(prompt, list):\n raise ValueError(f\"`prompt` has to be of type `str` or `list` but is {type(prompt)}\")\n\n if height % 8 != 0 or width % 8 != 0:\n raise ValueError(\n f\"`height` and `width` have to be divisible by 8 but are {height} and {width}.\"\n )\n\n if (callback_steps is None) or (\n callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)\n ):\n raise ValueError(\n f\"`callback_steps` has to be a positive integer but is {callback_steps} of type\"\n f\" {type(callback_steps)}.\"\n )\n\n def prepare_latents(\n self,\n batch_size,\n num_channels_latents,\n clip_length,\n height,\n width,\n dtype,\n device,\n generator,\n latents=None,\n ):\n shape = (\n batch_size,\n num_channels_latents,\n clip_length,\n height // self.vae_scale_factor,\n width // self.vae_scale_factor,\n )\n if isinstance(generator, list) and len(generator) != batch_size:\n raise ValueError(\n f\"You have passed a list of generators of length {len(generator)}, but requested an effective batch\"\n f\" size of {batch_size}. Make sure the batch size matches the length of the generators.\"\n )\n\n if latents is None:\n rand_device = \"cpu\" if device.type == \"mps\" else device\n\n if isinstance(generator, list):\n shape = (1,) + shape[1:]\n latents = [\n torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype)\n for i in range(batch_size)\n ]\n latents = torch.cat(latents, dim=0).to(device)\n else:\n latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(\n device\n )\n else:\n if latents.shape != shape:\n raise ValueError(f\"Unexpected latents shape, got {latents.shape}, expected {shape}\")\n latents = latents.to(device)\n\n # scale the initial noise by the standard deviation required by the scheduler\n latents = latents * self.scheduler.init_noise_sigma\n return latents\n\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]],\n height: Optional[int] = None,\n width: Optional[int] = None,\n num_inference_steps: int = 50,\n clip_length: int = 8,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_images_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: Optional[int] = 1,\n ):\n r\"\"\"\n Function invoked when calling the pipeline for generation.\n\n Args:\n prompt (`str` or `List[str]`):\n The prompt or prompts to guide the image generation.\n height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):\n The height in pixels of the generated image.\n width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):\n The width in pixels of the generated image.\n num_inference_steps (`int`, *optional*, defaults to 50):\n The number of denoising steps. More denoising steps usually lead to a higher quality image at the\n expense of slower inference.\n guidance_scale (`float`, *optional*, defaults to 7.5):\n Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).\n `guidance_scale` is defined as `w` of equation 2. of [Imagen\n Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >\n 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,\n usually at the expense of lower image quality.\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored\n if `guidance_scale` is less than `1`).\n num_images_per_prompt (`int`, *optional*, defaults to 1):\n The number of images to generate per prompt.\n eta (`float`, *optional*, defaults to 0.0):\n Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to\n [`schedulers.DDIMScheduler`], will be ignored for others.\n generator (`torch.Generator`, *optional*):\n One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)\n to make generation deterministic.\n latents (`torch.FloatTensor`, *optional*):\n Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image\n generation. Can be used to tweak the same generation with different prompts. If not provided, a latents\n tensor will ge generated by sampling using the supplied random `generator`.\n output_type (`str`, *optional*, defaults to `\"pil\"`):\n The output format of the generate image. Choose between\n [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a\n plain tuple.\n callback (`Callable`, *optional*):\n A function that will be called every `callback_steps` steps during inference. The function will be\n called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.\n callback_steps (`int`, *optional*, defaults to 1):\n The frequency at which the `callback` function will be called. If not specified, the callback will be\n called at every step.\n\n Returns:\n [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:\n [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.\n When returning a tuple, the first element is a list with the generated images, and the second element is a\n list of `bool`s denoting whether the corresponding generated image likely represents \"not-safe-for-work\"\n (nsfw) content, according to the `safety_checker`.\n \"\"\"\n # 0. Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n # 1. Check inputs. Raise error if not correct\n self.check_inputs(prompt, height, width, callback_steps)\n\n # 2. Define call parameters\n batch_size = 1 if isinstance(prompt, str) else len(prompt)\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n # 3. Encode input prompt\n text_embeddings = self._encode_prompt(\n prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt\n )\n\n # 4. Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps = self.scheduler.timesteps\n\n # 5. Prepare latent variables\n num_channels_latents = self.unet.in_channels\n # [1, 4, 8, 64, 64]\n latents = self.prepare_latents(\n batch_size * num_images_per_prompt,\n num_channels_latents,\n clip_length,\n height,\n width,\n text_embeddings.dtype,\n device,\n generator,\n latents,\n )\n latents_dtype = latents.dtype\n\n # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # 7. Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n # predict the noise residual\n # [2, 4, 8, 64, 64]\n noise_pred = self.unet(\n latent_model_input, t, encoder_hidden_states=text_embeddings\n ).sample.to(dtype=latents_dtype)\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (\n noise_pred_text - noise_pred_uncond\n )\n\n # compute the previous noisy sample x_t -> x_t-1 [1, 4, 8, 64, 64]\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or (\n (i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0\n ):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n # 8. Post-processing\n image = self.decode_latents(latents)\n\n # 9. Run safety checker\n has_nsfw_concept = None\n\n # 10. Convert to PIL\n if output_type == \"pil\":\n image = self.numpy_to_pil(image)\n\n if not return_dict:\n return (image, has_nsfw_concept)\n torch.cuda.empty_cache()\n return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)\n\n @staticmethod\n def numpy_to_pil(images):\n # (1, 16, 512, 512, 3)\n pil_images = []\n is_video = (len(images.shape)==5)\n if is_video:\n for sequence in images:\n pil_images.append(DiffusionPipeline.numpy_to_pil(sequence))\n else:\n pil_images.append(DiffusionPipeline.numpy_to_pil(images))\n return pil_images\n\n def print_pipeline(self, logger):\n print('Overview function of pipeline: ')\n print(self.__class__)\n\n print(self)\n \n expected_modules, optional_parameters = self._get_signature_keys(self) \n components_details = {\n k: getattr(self, k) for k in self.config.keys() if not k.startswith(\"_\") and k not in optional_parameters\n }\n import json\n logger.info(str(components_details))\n # logger.info(str(json.dumps(components_details, indent = 4)))\n # print(str(components_details))\n # print(self._optional_components)\n \n print(f\"python version {sys.version}\")\n print(f\"torch version {torch.__version__}\")\n print(f\"validate gpu status:\")\n print( torch.tensor(1.0).cuda()*2)\n os.system(\"nvcc --version\")\n\n import diffusers\n print(diffusers.__version__)\n print(diffusers.__file__)\n\n try:\n import bitsandbytes\n print(bitsandbytes.__file__)\n except:\n print(\"fail to import bitsandbytes\")\n # os.system(\"accelerate env\")\n # os.system(\"python -m xformers.info\")" } ]
from typing import Union from einops import rearrange from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ..models.unet_3d_condition import UNetPseudo3DConditionModel from video_diffusion.pipelines.stable_diffusion import SpatioTemporalStableDiffusionPipeline import torch import torch.nn.functional as F
11,723
class DDPMTrainer(SpatioTemporalStableDiffusionPipeline): def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer,
class DDPMTrainer(SpatioTemporalStableDiffusionPipeline): def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer,
unet: UNetPseudo3DConditionModel,
0
2023-10-09 14:38:28+00:00
16k
mlpc-ucsd/MaskCLIP
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "maskclip/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NAME = \"mask_former_semantic\"\n # Color augmentation\n cfg.INPUT.COLOR_AUG_SSD = False\n # We retry random cropping until no single category in semantic segmentation GT occupies more\n # than `SINGLE_CATEGORY_MAX_AREA` part of the crop.\n cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0\n # Pad image and segmentation GT in dataset mapper.\n cfg.INPUT.SIZE_DIVISIBILITY = -1\n\n # solver config\n # weight decay on embedding\n cfg.SOLVER.WEIGHT_DECAY_EMBED = 0.0\n # optimizer\n cfg.SOLVER.OPTIMIZER = \"ADAMW\"\n cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1\n\n # mask_former model config\n cfg.MODEL.MASK_FORMER = CN()\n\n # loss\n cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION = True\n cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT = 0.1\n cfg.MODEL.MASK_FORMER.CLASS_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.DICE_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.MASK_WEIGHT = 20.0\n\n # transformer config\n cfg.MODEL.MASK_FORMER.NHEADS = 8\n cfg.MODEL.MASK_FORMER.DROPOUT = 0.1\n cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD = 2048\n cfg.MODEL.MASK_FORMER.ENC_LAYERS = 0\n cfg.MODEL.MASK_FORMER.DEC_LAYERS = 6\n cfg.MODEL.MASK_FORMER.PRE_NORM = False\n\n cfg.MODEL.MASK_FORMER.HIDDEN_DIM = 256\n cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES = 100\n\n cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE = \"res5\"\n cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ = False\n\n # mask_former inference config\n cfg.MODEL.MASK_FORMER.TEST = CN()\n cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON = True\n cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON = False\n cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON = False\n cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE = False\n\n # Sometimes `backbone.size_divisibility` is set to 0 for some backbone (e.g. ResNet)\n # you can use this config to override\n cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY = 32\n\n # pixel decoder config\n cfg.MODEL.SEM_SEG_HEAD.MASK_DIM = 256\n # adding transformer in pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS = 0\n # pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME = \"BasePixelDecoder\"\n\n # swin transformer backbone\n cfg.MODEL.SWIN = CN()\n cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE = 224\n cfg.MODEL.SWIN.PATCH_SIZE = 4\n cfg.MODEL.SWIN.EMBED_DIM = 96\n cfg.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]\n cfg.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]\n cfg.MODEL.SWIN.WINDOW_SIZE = 7\n cfg.MODEL.SWIN.MLP_RATIO = 4.0\n cfg.MODEL.SWIN.QKV_BIAS = True\n cfg.MODEL.SWIN.QK_SCALE = None\n cfg.MODEL.SWIN.DROP_RATE = 0.0\n cfg.MODEL.SWIN.ATTN_DROP_RATE = 0.0\n cfg.MODEL.SWIN.DROP_PATH_RATE = 0.3\n cfg.MODEL.SWIN.APE = False\n cfg.MODEL.SWIN.PATCH_NORM = True\n cfg.MODEL.SWIN.OUT_FEATURES = [\"res2\", \"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SWIN.USE_CHECKPOINT = False\n\n # NOTE: maskformer2 extra configs\n # transformer module\n cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME = \"MultiScaleMaskedTransformerDecoder\"\n\n # LSJ aug\n cfg.INPUT.IMAGE_SIZE = 1024\n cfg.INPUT.MIN_SCALE = 0.1\n cfg.INPUT.MAX_SCALE = 2.0\n\n # MSDeformAttn encoder configs\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES = [\"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_POINTS = 4\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_HEADS = 8\n\n # point loss configs\n # Number of points sampled during training for a mask point head.\n cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS = 112 * 112\n # Oversampling parameter for PointRend point sampling during training. Parameter `k` in the\n # original paper.\n cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO = 3.0\n # Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in\n # the original paper.\n cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO = 0.75\n\n # add MaskCLIP configs\n cfg.MODEL.CLIP_MODEL = CN()\n cfg.MODEL.CLIP_MODEL.NAME = 'ViT-L/14@336px'\n cfg.MODEL.CLIP_MODEL.INPUT_RESOLUTION = 336\n cfg.MODEL.CLIP_MODEL.PATCH_SIZE = 14\n cfg.MODEL.CLIP_MODEL.WIDTH = 1024\n cfg.MODEL.CLIP_MODEL.LAYERS = 24\n cfg.MODEL.CLIP_MODEL.HEADS = 16\n cfg.MODEL.CLIP_MODEL.OUTPUT_DIM = 768\n\n cfg.MODEL.CLIP_MODEL.TEMPERATURE = 0.01" }, { "identifier": "COCOInstanceNewBaselineDatasetMapper", "path": "maskclip/data/dataset_mappers/coco_instance_new_baseline_dataset_mapper.py", "snippet": "class COCOInstanceNewBaselineDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer.\n\n This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n tfm_gens,\n image_format,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n tfm_gens: data augmentation\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n self.tfm_gens = tfm_gens\n logging.getLogger(__name__).info(\n \"[COCOInstanceNewBaselineDatasetMapper] Full TransformGens used in training: {}\".format(str(self.tfm_gens))\n )\n\n self.img_format = image_format\n self.is_train = is_train\n \n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n tfm_gens = build_transform_gen(cfg, is_train)\n\n ret = {\n \"is_train\": is_train,\n \"tfm_gens\": tfm_gens,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n # TODO: get padding mask\n # by feeding a \"segmentation mask\" to the same transforms\n padding_mask = np.ones(image.shape[:2])\n\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n # the crop transformation has default padding value 0 for segmentation\n padding_mask = transforms.apply_segmentation(padding_mask)\n padding_mask = ~ padding_mask.astype(bool)\n\n image_shape = image.shape[:2] # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n dataset_dict[\"padding_mask\"] = torch.as_tensor(np.ascontiguousarray(padding_mask))\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n return dataset_dict\n\n if \"annotations\" in dataset_dict:\n # USER: Modify this if you want to keep them for some reason.\n for anno in dataset_dict[\"annotations\"]:\n # Let's always keep mask\n # if not self.mask_on:\n # anno.pop(\"segmentation\", None)\n anno.pop(\"keypoints\", None)\n\n # USER: Implement additional transformations if you have other types of data\n annos = [\n utils.transform_instance_annotations(obj, transforms, image_shape)\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n # NOTE: does not support BitMask due to augmentation\n # Current BitMask cannot handle empty objects\n instances = utils.annotations_to_instances(annos, image_shape)\n # After transforms such as cropping are applied, the bounding box may no longer\n # tightly bound the object. As an example, imagine a triangle object\n # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight\n # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to\n # the intersection of original bounding box and the cropping box.\n instances.gt_boxes = instances.gt_masks.get_bounding_boxes()\n # Need to filter empty instances first (due to augmentation)\n instances = utils.filter_empty_instances(instances)\n # Generate masks from polygon\n h, w = instances.image_size\n # image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float)\n if hasattr(instances, 'gt_masks'):\n gt_masks = instances.gt_masks\n gt_masks = convert_coco_poly_to_mask(gt_masks.polygons, h, w)\n instances.gt_masks = gt_masks\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "COCOPanopticNewBaselineDatasetMapper", "path": "maskclip/data/dataset_mappers/coco_panoptic_new_baseline_dataset_mapper.py", "snippet": "class COCOPanopticNewBaselineDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer.\n\n This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n tfm_gens,\n image_format,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n crop_gen: crop augmentation\n tfm_gens: data augmentation\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n self.tfm_gens = tfm_gens\n logging.getLogger(__name__).info(\n \"[COCOPanopticNewBaselineDatasetMapper] Full TransformGens used in training: {}\".format(\n str(self.tfm_gens)\n )\n )\n\n self.img_format = image_format\n self.is_train = is_train\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n tfm_gens = build_transform_gen(cfg, is_train)\n\n ret = {\n \"is_train\": is_train,\n \"tfm_gens\": tfm_gens,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n image_shape = image.shape[:2] # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n return dataset_dict\n\n if \"pan_seg_file_name\" in dataset_dict:\n pan_seg_gt = utils.read_image(dataset_dict.pop(\"pan_seg_file_name\"), \"RGB\")\n segments_info = dataset_dict[\"segments_info\"]\n\n # apply the same transformation to panoptic segmentation\n pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)\n\n from panopticapi.utils import rgb2id\n\n pan_seg_gt = rgb2id(pan_seg_gt)\n\n instances = Instances(image_shape)\n classes = []\n masks = []\n for segment_info in segments_info:\n class_id = segment_info[\"category_id\"]\n if not segment_info[\"iscrowd\"]:\n classes.append(class_id)\n masks.append(pan_seg_gt == segment_info[\"id\"])\n\n classes = np.array(classes)\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))\n instances.gt_boxes = Boxes(torch.zeros((0, 4)))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n instances.gt_boxes = masks.get_bounding_boxes()\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerInstanceDatasetMapper", "path": "maskclip/data/dataset_mappers/mask_former_instance_dataset_mapper.py", "snippet": "class MaskFormerInstanceDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for instance segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens = augmentations\n self.img_format = image_format\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n\n # # Build augmentation\n # augs = [\n # T.ResizeShortestEdge(\n # cfg.INPUT.MIN_SIZE_TRAIN,\n # cfg.INPUT.MAX_SIZE_TRAIN,\n # cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,\n # )\n # ]\n # if cfg.INPUT.CROP.ENABLED:\n # augs.append(\n # T.RandomCrop(\n # cfg.INPUT.CROP.TYPE,\n # cfg.INPUT.CROP.SIZE,\n # )\n # )\n # if cfg.INPUT.COLOR_AUG_SSD:\n # augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n # augs.append(T.RandomFlip())\n\n augs = [\n T.Resize((1024, 1024))\n ]\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerPanopticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n aug_input = T.AugInput(image)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n\n # transform instnace masks\n assert \"annotations\" in dataset_dict\n for anno in dataset_dict[\"annotations\"]:\n anno.pop(\"keypoints\", None)\n\n annos = [\n utils.transform_instance_annotations(obj, transforms, image.shape[:2])\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n\n if len(annos):\n assert \"segmentation\" in annos[0]\n segms = [obj[\"segmentation\"] for obj in annos]\n masks = []\n for segm in segms:\n if isinstance(segm, list):\n # polygon\n masks.append(polygons_to_bitmask(segm, *image.shape[:2]))\n elif isinstance(segm, dict):\n # COCO RLE\n masks.append(mask_util.decode(segm))\n elif isinstance(segm, np.ndarray):\n assert segm.ndim == 2, \"Expect segmentation of 2 dimensions, got {}.\".format(\n segm.ndim\n )\n # mask array\n masks.append(segm)\n else:\n raise ValueError(\n \"Cannot convert segmentation of type '{}' to BitMasks!\"\n \"Supported types are: polygons as list[list[float] or ndarray],\"\n \" COCO-style RLE as a dict, or a binary segmentation mask \"\n \" in a 2D numpy array of shape HxW.\".format(type(segm))\n )\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n masks = [torch.from_numpy(np.ascontiguousarray(x)) for x in masks]\n\n classes = [int(obj[\"category_id\"]) for obj in annos]\n classes = torch.tensor(classes, dtype=torch.int64)\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n # pad image\n image = F.pad(image, padding_size, value=128).contiguous()\n # pad mask\n masks = [F.pad(x, padding_size, value=0).contiguous() for x in masks]\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n\n # Prepare per-category binary masks\n instances = Instances(image_shape)\n instances.gt_classes = classes\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, image.shape[-2], image.shape[-1]))\n else:\n masks = BitMasks(torch.stack(masks))\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerPanopticDatasetMapper", "path": "maskclip/data/dataset_mappers/mask_former_panoptic_dataset_mapper.py", "snippet": "class MaskFormerPanopticDatasetMapper(MaskFormerSemanticDatasetMapper):\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n super().__init__(\n is_train,\n augmentations=augmentations,\n image_format=image_format,\n ignore_label=ignore_label,\n size_divisibility=size_divisibility,\n )\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerPanopticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n # semantic segmentation\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\"double\")\n else:\n sem_seg_gt = None\n\n # panoptic segmentation\n if \"pan_seg_file_name\" in dataset_dict:\n pan_seg_gt = utils.read_image(dataset_dict.pop(\"pan_seg_file_name\"), \"RGB\")\n segments_info = dataset_dict[\"segments_info\"]\n else:\n pan_seg_gt = None\n segments_info = None\n\n if pan_seg_gt is None:\n raise ValueError(\n \"Cannot find 'pan_seg_file_name' for panoptic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n if sem_seg_gt is not None:\n sem_seg_gt = aug_input.sem_seg\n\n # apply the same transformation to panoptic segmentation\n pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)\n\n from panopticapi.utils import rgb2id\n\n pan_seg_gt = rgb2id(pan_seg_gt)\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n pan_seg_gt = torch.as_tensor(pan_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()\n pan_seg_gt = F.pad(\n pan_seg_gt, padding_size, value=0\n ).contiguous() # 0 is the VOID panoptic label\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\"Pemantic segmentation dataset should not have 'annotations'.\")\n\n # Prepare per-category binary masks\n pan_seg_gt = pan_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = []\n masks = []\n for segment_info in segments_info:\n class_id = segment_info[\"category_id\"]\n if not segment_info[\"iscrowd\"]:\n classes.append(class_id)\n masks.append(pan_seg_gt == segment_info[\"id\"])\n\n classes = np.array(classes)\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerSemanticDatasetMapper", "path": "maskclip/data/dataset_mappers/mask_former_semantic_dataset_mapper.py", "snippet": "class MaskFormerSemanticDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for semantic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens = augmentations\n self.img_format = image_format\n self.ignore_label = ignore_label\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n\n # Build augmentation\n # augs = [\n # T.ResizeShortestEdge(\n # cfg.INPUT.MIN_SIZE_TRAIN,\n # cfg.INPUT.MAX_SIZE_TRAIN,\n # cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,\n # )\n # ]\n # if cfg.INPUT.CROP.ENABLED:\n # augs.append(\n # T.RandomCrop_CategoryAreaConstraint(\n # cfg.INPUT.CROP.TYPE,\n # cfg.INPUT.CROP.SIZE,\n # cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA,\n # cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,\n # )\n # )\n # if cfg.INPUT.COLOR_AUG_SSD:\n # augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n # augs.append(T.RandomFlip())\n\n augs = [\n T.Resize((1024, 1024))\n ]\n\n # Assume always applies to the training set.\n dataset_names = cfg.DATASETS.TRAIN\n meta = MetadataCatalog.get(dataset_names[0])\n ignore_label = meta.ignore_label\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"ignore_label\": ignore_label,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerSemanticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\"double\")\n else:\n sem_seg_gt = None\n\n if sem_seg_gt is None:\n raise ValueError(\n \"Cannot find 'sem_seg_file_name' for semantic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n sem_seg_gt = aug_input.sem_seg\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\"Semantic segmentation dataset should not have 'annotations'.\")\n\n # Prepare per-category binary masks\n if sem_seg_gt is not None:\n sem_seg_gt = sem_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = np.unique(sem_seg_gt)\n # remove ignored region\n classes = classes[classes != self.ignore_label]\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n\n masks = []\n for class_id in classes:\n masks.append(sem_seg_gt == class_id)\n\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, sem_seg_gt.shape[-2], sem_seg_gt.shape[-1]))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "SemanticSegmentorWithTTA", "path": "maskclip/test_time_augmentation.py", "snippet": "class SemanticSegmentorWithTTA(nn.Module):\n \"\"\"\n A SemanticSegmentor with test-time augmentation enabled.\n Its :meth:`__call__` method has the same interface as :meth:`SemanticSegmentor.forward`.\n \"\"\"\n\n def __init__(self, cfg, model, tta_mapper=None, batch_size=1):\n \"\"\"\n Args:\n cfg (CfgNode):\n model (SemanticSegmentor): a SemanticSegmentor to apply TTA on.\n tta_mapper (callable): takes a dataset dict and returns a list of\n augmented versions of the dataset dict. Defaults to\n `DatasetMapperTTA(cfg)`.\n batch_size (int): batch the augmented images into this batch size for inference.\n \"\"\"\n super().__init__()\n if isinstance(model, DistributedDataParallel):\n model = model.module\n self.cfg = cfg.clone()\n\n self.model = model\n\n if tta_mapper is None:\n tta_mapper = DatasetMapperTTA(cfg)\n self.tta_mapper = tta_mapper\n self.batch_size = batch_size\n\n def __call__(self, batched_inputs):\n \"\"\"\n Same input/output format as :meth:`SemanticSegmentor.forward`\n \"\"\"\n\n def _maybe_read_image(dataset_dict):\n ret = copy.copy(dataset_dict)\n if \"image\" not in ret:\n image = read_image(ret.pop(\"file_name\"), self.model.input_format)\n image = torch.from_numpy(np.ascontiguousarray(image.transpose(2, 0, 1))) # CHW\n ret[\"image\"] = image\n if \"height\" not in ret and \"width\" not in ret:\n ret[\"height\"] = image.shape[1]\n ret[\"width\"] = image.shape[2]\n return ret\n\n processed_results = []\n for x in batched_inputs:\n result = self._inference_one_image(_maybe_read_image(x))\n processed_results.append(result)\n return processed_results\n\n def _inference_one_image(self, input):\n \"\"\"\n Args:\n input (dict): one dataset dict with \"image\" field being a CHW tensor\n Returns:\n dict: one output dict\n \"\"\"\n orig_shape = (input[\"height\"], input[\"width\"])\n augmented_inputs, tfms = self._get_augmented_inputs(input)\n\n final_predictions = None\n count_predictions = 0\n for input, tfm in zip(augmented_inputs, tfms):\n count_predictions += 1\n with torch.no_grad():\n if final_predictions is None:\n if any(isinstance(t, HFlipTransform) for t in tfm.transforms):\n final_predictions = self.model([input])[0].pop(\"sem_seg\").flip(dims=[2])\n else:\n final_predictions = self.model([input])[0].pop(\"sem_seg\")\n else:\n if any(isinstance(t, HFlipTransform) for t in tfm.transforms):\n final_predictions += self.model([input])[0].pop(\"sem_seg\").flip(dims=[2])\n else:\n final_predictions += self.model([input])[0].pop(\"sem_seg\")\n\n final_predictions = final_predictions / count_predictions\n return {\"sem_seg\": final_predictions}\n\n def _get_augmented_inputs(self, input):\n augmented_inputs = self.tta_mapper(input)\n tfms = [x.pop(\"transforms\") for x in augmented_inputs]\n return augmented_inputs, tfms" }, { "identifier": "InstanceSegEvaluator", "path": "maskclip/evaluation/instance_evaluation.py", "snippet": "class InstanceSegEvaluator(COCOEvaluator):\n \"\"\"\n Evaluate AR for object proposals, AP for instance detection/segmentation, AP\n for keypoint detection outputs using COCO's metrics.\n See http://cocodataset.org/#detection-eval and\n http://cocodataset.org/#keypoints-eval to understand its metrics.\n The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means\n the metric cannot be computed (e.g. due to no predictions made).\n\n In addition to COCO, this evaluator is able to support any bounding box detection,\n instance segmentation, or keypoint detection dataset.\n \"\"\"\n\n def _eval_predictions(self, predictions, img_ids=None):\n \"\"\"\n Evaluate predictions. Fill self._results with the metrics of the tasks.\n \"\"\"\n self._logger.info(\"Preparing results for COCO format ...\")\n coco_results = list(itertools.chain(*[x[\"instances\"] for x in predictions]))\n tasks = self._tasks or self._tasks_from_predictions(coco_results)\n\n # unmap the category ids for COCO\n if hasattr(self._metadata, \"thing_dataset_id_to_contiguous_id\"):\n dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id\n # all_contiguous_ids = list(dataset_id_to_contiguous_id.values())\n # num_classes = len(all_contiguous_ids)\n # assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1\n\n reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}\n for result in coco_results:\n category_id = result[\"category_id\"]\n # assert category_id < num_classes, (\n # f\"A prediction has class={category_id}, \"\n # f\"but the dataset only has {num_classes} classes and \"\n # f\"predicted class id should be in [0, {num_classes - 1}].\"\n # )\n assert category_id in reverse_id_mapping, (\n f\"A prediction has class={category_id}, \"\n f\"but the dataset only has class ids in {dataset_id_to_contiguous_id}.\"\n )\n result[\"category_id\"] = reverse_id_mapping[category_id]\n\n if self._output_dir:\n file_path = os.path.join(self._output_dir, \"coco_instances_results.json\")\n self._logger.info(\"Saving results to {}\".format(file_path))\n with PathManager.open(file_path, \"w\") as f:\n f.write(json.dumps(coco_results))\n f.flush()\n\n if not self._do_evaluation:\n self._logger.info(\"Annotations are not available for evaluation.\")\n return\n\n self._logger.info(\n \"Evaluating predictions with {} COCO API...\".format(\n \"unofficial\" if self._use_fast_impl else \"official\"\n )\n )\n for task in sorted(tasks):\n assert task in {\"bbox\", \"segm\", \"keypoints\"}, f\"Got unknown task: {task}!\"\n coco_eval = (\n _evaluate_predictions_on_coco(\n self._coco_api,\n coco_results,\n task,\n kpt_oks_sigmas=self._kpt_oks_sigmas,\n use_fast_impl=self._use_fast_impl,\n img_ids=img_ids,\n max_dets_per_image=self._max_dets_per_image,\n )\n if len(coco_results) > 0\n else None # cocoapi does not handle empty results very well\n )\n\n res = self._derive_coco_results(\n coco_eval, task, class_names=self._metadata.get(\"thing_classes\")\n )\n self._results[task] = res" } ]
from shapely.errors import ShapelyDeprecationWarning from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import MetadataCatalog, build_detection_train_loader from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from maskclip import ( COCOInstanceNewBaselineDatasetMapper, COCOPanopticNewBaselineDatasetMapper, InstanceSegEvaluator, MaskFormerInstanceDatasetMapper, MaskFormerPanopticDatasetMapper, MaskFormerSemanticDatasetMapper, SemanticSegmentorWithTTA, add_maskformer2_config, ) import warnings import copy import itertools import logging import os import torch import detectron2.utils.comm as comm
11,525
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ MaskFormer Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning) except: pass class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic": mapper = MaskFormerPanopticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Instance segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance":
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ MaskFormer Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning) except: pass class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic": mapper = MaskFormerPanopticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Instance segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance":
mapper = MaskFormerInstanceDatasetMapper(cfg, True)
3
2023-10-13 02:32:25+00:00
16k
mlpc-ucsd/MasQCLIP
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "masqclip/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NAME = \"mask_former_semantic\"\n # Color augmentation\n cfg.INPUT.COLOR_AUG_SSD = False\n # We retry random cropping until no single category in semantic segmentation GT occupies more\n # than `SINGLE_CATEGORY_MAX_AREA` part of the crop.\n cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0\n # Pad image and segmentation GT in dataset mapper.\n cfg.INPUT.SIZE_DIVISIBILITY = -1\n\n # solver config\n # weight decay on embedding\n cfg.SOLVER.WEIGHT_DECAY_EMBED = 0.0\n # optimizer\n cfg.SOLVER.OPTIMIZER = \"ADAMW\"\n cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1\n\n # mask_former model config\n cfg.MODEL.MASK_FORMER = CN()\n\n # loss\n cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION = True\n cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT = 0.1\n cfg.MODEL.MASK_FORMER.CLASS_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.DICE_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.MASK_WEIGHT = 20.0\n\n # transformer config\n cfg.MODEL.MASK_FORMER.NHEADS = 8\n cfg.MODEL.MASK_FORMER.DROPOUT = 0.1\n cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD = 2048\n cfg.MODEL.MASK_FORMER.ENC_LAYERS = 0\n cfg.MODEL.MASK_FORMER.DEC_LAYERS = 6\n cfg.MODEL.MASK_FORMER.PRE_NORM = False\n\n cfg.MODEL.MASK_FORMER.HIDDEN_DIM = 256\n cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES = 100\n\n cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE = \"res5\"\n cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ = False\n\n # mask_former inference config\n cfg.MODEL.MASK_FORMER.TEST = CN()\n cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON = True\n cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON = False\n cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON = False\n cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE = False\n\n # Sometimes `backbone.size_divisibility` is set to 0 for some backbone (e.g. ResNet)\n # you can use this config to override\n cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY = 32\n\n # pixel decoder config\n cfg.MODEL.SEM_SEG_HEAD.MASK_DIM = 256\n # adding transformer in pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS = 0\n # pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME = \"BasePixelDecoder\"\n\n # swin transformer backbone\n cfg.MODEL.SWIN = CN()\n cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE = 224\n cfg.MODEL.SWIN.PATCH_SIZE = 4\n cfg.MODEL.SWIN.EMBED_DIM = 96\n cfg.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]\n cfg.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]\n cfg.MODEL.SWIN.WINDOW_SIZE = 7\n cfg.MODEL.SWIN.MLP_RATIO = 4.0\n cfg.MODEL.SWIN.QKV_BIAS = True\n cfg.MODEL.SWIN.QK_SCALE = None\n cfg.MODEL.SWIN.DROP_RATE = 0.0\n cfg.MODEL.SWIN.ATTN_DROP_RATE = 0.0\n cfg.MODEL.SWIN.DROP_PATH_RATE = 0.3\n cfg.MODEL.SWIN.APE = False\n cfg.MODEL.SWIN.PATCH_NORM = True\n cfg.MODEL.SWIN.OUT_FEATURES = [\"res2\", \"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SWIN.USE_CHECKPOINT = False\n\n # NOTE: maskformer2 extra configs\n # transformer module\n cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME = \"MultiScaleMaskedTransformerDecoder\"\n\n # LSJ aug\n cfg.INPUT.IMAGE_SIZE = 1024\n cfg.INPUT.MIN_SCALE = 0.1\n cfg.INPUT.MAX_SCALE = 2.0\n\n # MSDeformAttn encoder configs\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES = [\"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_POINTS = 4\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_HEADS = 8\n\n # point loss configs\n # Number of points sampled during training for a mask point head.\n cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS = 112 * 112\n # Oversampling parameter for PointRend point sampling during training. Parameter `k` in the\n # original paper.\n cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO = 3.0\n # Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in\n # the original paper.\n cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO = 0.75" }, { "identifier": "add_masqclip_config", "path": "masqclip/config.py", "snippet": "def add_masqclip_config(cfg):\n \"\"\"\n Add config for MasQCLIP.\n \"\"\"\n cfg.MODEL.MASQ_CLIP = CN()\n cfg.MODEL.MASQ_CLIP.MODEL_NAME = [\"ViT-L/14@336px\"]\n \n cfg.MODEL.MASQ_CLIP.SCORE_THRESHOLD = 0.8\n cfg.MODEL.MASQ_CLIP.NMS_THRESHOLD = 0.1" }, { "identifier": "COCOInstanceNewBaselineDatasetMapper", "path": "masqclip/data/dataset_mappers/coco_instance_new_baseline_dataset_mapper.py", "snippet": "class COCOInstanceNewBaselineDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer.\n\n This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n tfm_gens,\n image_format,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n tfm_gens: data augmentation\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n self.tfm_gens = tfm_gens\n logging.getLogger(__name__).info(\n \"[COCOInstanceNewBaselineDatasetMapper] Full TransformGens used in training: {}\".format(str(self.tfm_gens))\n )\n\n self.img_format = image_format\n self.is_train = is_train\n \n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n tfm_gens = build_transform_gen(cfg, is_train)\n\n ret = {\n \"is_train\": is_train,\n \"tfm_gens\": tfm_gens,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n # TODO: get padding mask\n # by feeding a \"segmentation mask\" to the same transforms\n padding_mask = np.ones(image.shape[:2])\n\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n # the crop transformation has default padding value 0 for segmentation\n padding_mask = transforms.apply_segmentation(padding_mask)\n padding_mask = ~ padding_mask.astype(bool)\n\n image_shape = image.shape[:2] # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n dataset_dict[\"padding_mask\"] = torch.as_tensor(np.ascontiguousarray(padding_mask))\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n return dataset_dict\n\n if \"annotations\" in dataset_dict:\n # USER: Modify this if you want to keep them for some reason.\n for anno in dataset_dict[\"annotations\"]:\n # Let's always keep mask\n # if not self.mask_on:\n # anno.pop(\"segmentation\", None)\n anno.pop(\"keypoints\", None)\n\n # USER: Implement additional transformations if you have other types of data\n annos = [\n utils.transform_instance_annotations(obj, transforms, image_shape)\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n # NOTE: does not support BitMask due to augmentation\n # Current BitMask cannot handle empty objects\n instances = utils.annotations_to_instances(annos, image_shape)\n # After transforms such as cropping are applied, the bounding box may no longer\n # tightly bound the object. As an example, imagine a triangle object\n # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight\n # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to\n # the intersection of original bounding box and the cropping box.\n instances.gt_boxes = instances.gt_masks.get_bounding_boxes()\n # Need to filter empty instances first (due to augmentation)\n instances = utils.filter_empty_instances(instances)\n # Generate masks from polygon\n h, w = instances.image_size\n # image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float)\n if hasattr(instances, 'gt_masks'):\n gt_masks = instances.gt_masks\n gt_masks = convert_coco_poly_to_mask(gt_masks.polygons, h, w)\n instances.gt_masks = gt_masks\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "COCOPanopticNewBaselineDatasetMapper", "path": "masqclip/data/dataset_mappers/coco_panoptic_new_baseline_dataset_mapper.py", "snippet": "class COCOPanopticNewBaselineDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer.\n\n This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n tfm_gens,\n image_format,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n crop_gen: crop augmentation\n tfm_gens: data augmentation\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n self.tfm_gens = tfm_gens\n logging.getLogger(__name__).info(\n \"[COCOPanopticNewBaselineDatasetMapper] Full TransformGens used in training: {}\".format(\n str(self.tfm_gens)\n )\n )\n\n self.img_format = image_format\n self.is_train = is_train\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n tfm_gens = build_transform_gen(cfg, is_train)\n\n ret = {\n \"is_train\": is_train,\n \"tfm_gens\": tfm_gens,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n image_shape = image.shape[:2] # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n return dataset_dict\n\n if \"pan_seg_file_name\" in dataset_dict:\n pan_seg_gt = utils.read_image(dataset_dict.pop(\"pan_seg_file_name\"), \"RGB\")\n segments_info = dataset_dict[\"segments_info\"]\n\n # apply the same transformation to panoptic segmentation\n pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)\n\n from panopticapi.utils import rgb2id\n\n pan_seg_gt = rgb2id(pan_seg_gt)\n\n instances = Instances(image_shape)\n classes = []\n masks = []\n for segment_info in segments_info:\n class_id = segment_info[\"category_id\"]\n if not segment_info[\"iscrowd\"]:\n classes.append(class_id)\n masks.append(pan_seg_gt == segment_info[\"id\"])\n\n classes = np.array(classes)\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))\n instances.gt_boxes = Boxes(torch.zeros((0, 4)))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n instances.gt_boxes = masks.get_bounding_boxes()\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerInstanceDatasetMapper", "path": "masqclip/data/dataset_mappers/mask_former_instance_dataset_mapper.py", "snippet": "class MaskFormerInstanceDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for instance segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens = augmentations\n self.img_format = image_format\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # change_code_note\n\n # # Build augmentation\n # augs = [\n # T.ResizeShortestEdge(\n # cfg.INPUT.MIN_SIZE_TRAIN,\n # cfg.INPUT.MAX_SIZE_TRAIN,\n # cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,\n # )\n # ]\n # if cfg.INPUT.CROP.ENABLED:\n # augs.append(\n # T.RandomCrop(\n # cfg.INPUT.CROP.TYPE,\n # cfg.INPUT.CROP.SIZE,\n # )\n # )\n # if cfg.INPUT.COLOR_AUG_SSD:\n # augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n # augs.append(T.RandomFlip())\n\n augs = [\n T.Resize((1024, 1024))\n ]\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerPanopticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n aug_input = T.AugInput(image)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n\n # transform instnace masks\n assert \"annotations\" in dataset_dict\n for anno in dataset_dict[\"annotations\"]:\n anno.pop(\"keypoints\", None)\n\n annos = [\n utils.transform_instance_annotations(obj, transforms, image.shape[:2])\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n\n if len(annos):\n assert \"segmentation\" in annos[0]\n segms = [obj[\"segmentation\"] for obj in annos]\n masks = []\n for segm in segms:\n if isinstance(segm, list):\n # polygon\n masks.append(polygons_to_bitmask(segm, *image.shape[:2]))\n elif isinstance(segm, dict):\n # COCO RLE\n masks.append(mask_util.decode(segm))\n elif isinstance(segm, np.ndarray):\n assert segm.ndim == 2, \"Expect segmentation of 2 dimensions, got {}.\".format(\n segm.ndim\n )\n # mask array\n masks.append(segm)\n else:\n raise ValueError(\n \"Cannot convert segmentation of type '{}' to BitMasks!\"\n \"Supported types are: polygons as list[list[float] or ndarray],\"\n \" COCO-style RLE as a dict, or a binary segmentation mask \"\n \" in a 2D numpy array of shape HxW.\".format(type(segm))\n )\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n masks = [torch.from_numpy(np.ascontiguousarray(x)) for x in masks]\n\n classes = [int(obj[\"category_id\"]) for obj in annos]\n classes = torch.tensor(classes, dtype=torch.int64)\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n # pad image\n image = F.pad(image, padding_size, value=128).contiguous()\n # pad mask\n masks = [F.pad(x, padding_size, value=0).contiguous() for x in masks]\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n\n # Prepare per-category binary masks\n instances = Instances(image_shape)\n instances.gt_classes = classes\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, image.shape[-2], image.shape[-1]))\n else:\n masks = BitMasks(torch.stack(masks))\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerPanopticDatasetMapper", "path": "masqclip/data/dataset_mappers/mask_former_panoptic_dataset_mapper.py", "snippet": "class MaskFormerPanopticDatasetMapper(MaskFormerSemanticDatasetMapper):\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n super().__init__(\n is_train,\n augmentations=augmentations,\n image_format=image_format,\n ignore_label=ignore_label,\n size_divisibility=size_divisibility,\n )\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerPanopticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n # semantic segmentation\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\"double\")\n else:\n sem_seg_gt = None\n\n # panoptic segmentation\n if \"pan_seg_file_name\" in dataset_dict:\n pan_seg_gt = utils.read_image(dataset_dict.pop(\"pan_seg_file_name\"), \"RGB\")\n segments_info = dataset_dict[\"segments_info\"]\n else:\n pan_seg_gt = None\n segments_info = None\n\n if pan_seg_gt is None:\n raise ValueError(\n \"Cannot find 'pan_seg_file_name' for panoptic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n if sem_seg_gt is not None:\n sem_seg_gt = aug_input.sem_seg\n\n # apply the same transformation to panoptic segmentation\n pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)\n\n from panopticapi.utils import rgb2id\n\n pan_seg_gt = rgb2id(pan_seg_gt)\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n pan_seg_gt = torch.as_tensor(pan_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()\n pan_seg_gt = F.pad(\n pan_seg_gt, padding_size, value=0\n ).contiguous() # 0 is the VOID panoptic label\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\"Pemantic segmentation dataset should not have 'annotations'.\")\n\n # Prepare per-category binary masks\n pan_seg_gt = pan_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = []\n masks = []\n for segment_info in segments_info:\n class_id = segment_info[\"category_id\"]\n if not segment_info[\"iscrowd\"]:\n classes.append(class_id)\n masks.append(pan_seg_gt == segment_info[\"id\"])\n\n classes = np.array(classes)\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerSemanticDatasetMapper", "path": "masqclip/data/dataset_mappers/mask_former_semantic_dataset_mapper.py", "snippet": "class MaskFormerSemanticDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for semantic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens = augmentations\n self.img_format = image_format\n self.ignore_label = ignore_label\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n\n augs = [\n T.Resize((1024, 1024))\n ]\n\n # Assume always applies to the training set.\n dataset_names = cfg.DATASETS.TRAIN\n meta = MetadataCatalog.get(dataset_names[0])\n ignore_label = meta.ignore_label\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"ignore_label\": ignore_label,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerSemanticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\"double\")\n else:\n sem_seg_gt = None\n\n if sem_seg_gt is None:\n raise ValueError(\n \"Cannot find 'sem_seg_file_name' for semantic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n sem_seg_gt = aug_input.sem_seg\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\"Semantic segmentation dataset should not have 'annotations'.\")\n\n # Prepare per-category binary masks\n if sem_seg_gt is not None:\n sem_seg_gt = sem_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = np.unique(sem_seg_gt)\n # remove ignored region\n classes = classes[classes != self.ignore_label]\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n\n masks = []\n for class_id in classes:\n masks.append(sem_seg_gt == class_id)\n\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, sem_seg_gt.shape[-2], sem_seg_gt.shape[-1]))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "SemanticSegmentorWithTTA", "path": "masqclip/test_time_augmentation.py", "snippet": "class SemanticSegmentorWithTTA(nn.Module):\n \"\"\"\n A SemanticSegmentor with test-time augmentation enabled.\n Its :meth:`__call__` method has the same interface as :meth:`SemanticSegmentor.forward`.\n \"\"\"\n\n def __init__(self, cfg, model, tta_mapper=None, batch_size=1):\n \"\"\"\n Args:\n cfg (CfgNode):\n model (SemanticSegmentor): a SemanticSegmentor to apply TTA on.\n tta_mapper (callable): takes a dataset dict and returns a list of\n augmented versions of the dataset dict. Defaults to\n `DatasetMapperTTA(cfg)`.\n batch_size (int): batch the augmented images into this batch size for inference.\n \"\"\"\n super().__init__()\n if isinstance(model, DistributedDataParallel):\n model = model.module\n self.cfg = cfg.clone()\n\n self.model = model\n\n if tta_mapper is None:\n tta_mapper = DatasetMapperTTA(cfg)\n self.tta_mapper = tta_mapper\n self.batch_size = batch_size\n\n def __call__(self, batched_inputs):\n \"\"\"\n Same input/output format as :meth:`SemanticSegmentor.forward`\n \"\"\"\n\n def _maybe_read_image(dataset_dict):\n ret = copy.copy(dataset_dict)\n if \"image\" not in ret:\n image = read_image(ret.pop(\"file_name\"), self.model.input_format)\n image = torch.from_numpy(np.ascontiguousarray(image.transpose(2, 0, 1))) # CHW\n ret[\"image\"] = image\n if \"height\" not in ret and \"width\" not in ret:\n ret[\"height\"] = image.shape[1]\n ret[\"width\"] = image.shape[2]\n return ret\n\n processed_results = []\n for x in batched_inputs:\n result = self._inference_one_image(_maybe_read_image(x))\n processed_results.append(result)\n return processed_results\n\n def _inference_one_image(self, input):\n \"\"\"\n Args:\n input (dict): one dataset dict with \"image\" field being a CHW tensor\n Returns:\n dict: one output dict\n \"\"\"\n orig_shape = (input[\"height\"], input[\"width\"])\n augmented_inputs, tfms = self._get_augmented_inputs(input)\n\n final_predictions = None\n count_predictions = 0\n for input, tfm in zip(augmented_inputs, tfms):\n count_predictions += 1\n with torch.no_grad():\n if final_predictions is None:\n if any(isinstance(t, HFlipTransform) for t in tfm.transforms):\n final_predictions = self.model([input])[0].pop(\"sem_seg\").flip(dims=[2])\n else:\n final_predictions = self.model([input])[0].pop(\"sem_seg\")\n else:\n if any(isinstance(t, HFlipTransform) for t in tfm.transforms):\n final_predictions += self.model([input])[0].pop(\"sem_seg\").flip(dims=[2])\n else:\n final_predictions += self.model([input])[0].pop(\"sem_seg\")\n\n final_predictions = final_predictions / count_predictions\n return {\"sem_seg\": final_predictions}\n\n def _get_augmented_inputs(self, input):\n augmented_inputs = self.tta_mapper(input)\n tfms = [x.pop(\"transforms\") for x in augmented_inputs]\n return augmented_inputs, tfms" }, { "identifier": "InstanceSegEvaluator", "path": "masqclip/evaluation/instance_evaluation.py", "snippet": "class InstanceSegEvaluator(COCOEvaluator):\n \"\"\"\n Evaluate AR for object proposals, AP for instance detection/segmentation, AP\n for keypoint detection outputs using COCO's metrics.\n See http://cocodataset.org/#detection-eval and\n http://cocodataset.org/#keypoints-eval to understand its metrics.\n The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means\n the metric cannot be computed (e.g. due to no predictions made).\n\n In addition to COCO, this evaluator is able to support any bounding box detection,\n instance segmentation, or keypoint detection dataset.\n \"\"\"\n\n def _eval_predictions(self, predictions, img_ids=None):\n \"\"\"\n Evaluate predictions. Fill self._results with the metrics of the tasks.\n \"\"\"\n self._logger.info(\"Preparing results for COCO format ...\")\n coco_results = list(itertools.chain(*[x[\"instances\"] for x in predictions]))\n tasks = self._tasks or self._tasks_from_predictions(coco_results)\n\n # unmap the category ids for COCO\n if hasattr(self._metadata, \"thing_dataset_id_to_contiguous_id\"):\n dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id\n # all_contiguous_ids = list(dataset_id_to_contiguous_id.values())\n # num_classes = len(all_contiguous_ids)\n # assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1\n\n reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}\n for result in coco_results:\n category_id = result[\"category_id\"]\n # assert category_id < num_classes, (\n # f\"A prediction has class={category_id}, \"\n # f\"but the dataset only has {num_classes} classes and \"\n # f\"predicted class id should be in [0, {num_classes - 1}].\"\n # )\n assert category_id in reverse_id_mapping, (\n f\"A prediction has class={category_id}, \"\n f\"but the dataset only has class ids in {dataset_id_to_contiguous_id}.\"\n )\n result[\"category_id\"] = reverse_id_mapping[category_id]\n\n if self._output_dir:\n file_path = os.path.join(self._output_dir, \"coco_instances_results.json\")\n self._logger.info(\"Saving results to {}\".format(file_path))\n with PathManager.open(file_path, \"w\") as f:\n f.write(json.dumps(coco_results))\n f.flush()\n\n if not self._do_evaluation:\n self._logger.info(\"Annotations are not available for evaluation.\")\n return\n\n self._logger.info(\n \"Evaluating predictions with {} COCO API...\".format(\n \"unofficial\" if self._use_fast_impl else \"official\"\n )\n )\n for task in sorted(tasks):\n assert task in {\"bbox\", \"segm\", \"keypoints\"}, f\"Got unknown task: {task}!\"\n coco_eval = (\n _evaluate_predictions_on_coco(\n self._coco_api,\n coco_results,\n task,\n kpt_oks_sigmas=self._kpt_oks_sigmas,\n use_fast_impl=self._use_fast_impl,\n img_ids=img_ids,\n max_dets_per_image=self._max_dets_per_image,\n )\n if len(coco_results) > 0\n else None # cocoapi does not handle empty results very well\n )\n\n res = self._derive_coco_results(\n coco_eval, task, class_names=self._metadata.get(\"thing_classes\")\n )\n self._results[task] = res" } ]
import copy import itertools import logging import os import torch import detectron2.utils.comm as comm import warnings from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import MetadataCatalog, build_detection_train_loader from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from masqclip import ( COCOInstanceNewBaselineDatasetMapper, COCOPanopticNewBaselineDatasetMapper, InstanceSegEvaluator, MaskFormerInstanceDatasetMapper, MaskFormerPanopticDatasetMapper, MaskFormerSemanticDatasetMapper, SemanticSegmentorWithTTA, add_maskformer2_config, add_masqclip_config, )
12,391
mapper = COCOPanopticNewBaselineDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) else: mapper = None return build_detection_train_loader(cfg, mapper=mapper) @classmethod def build_lr_scheduler(cls, cfg, optimizer): """ It now calls :func:`detectron2.solver.build_lr_scheduler`. Overwrite it if you'd like a different scheduler. """ return build_lr_scheduler(cfg, optimizer) @classmethod def build_optimizer(cls, cfg, model): weight_decay_norm = cfg.SOLVER.WEIGHT_DECAY_NORM weight_decay_embed = cfg.SOLVER.WEIGHT_DECAY_EMBED defaults = {} defaults["lr"] = cfg.SOLVER.BASE_LR defaults["weight_decay"] = cfg.SOLVER.WEIGHT_DECAY norm_module_types = ( torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d, torch.nn.SyncBatchNorm, # NaiveSyncBatchNorm inherits from BatchNorm2d torch.nn.GroupNorm, torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d, torch.nn.LayerNorm, torch.nn.LocalResponseNorm, ) params: List[Dict[str, Any]] = [] memo: Set[torch.nn.parameter.Parameter] = set() for module_name, module in model.named_modules(): for module_param_name, value in module.named_parameters(recurse=False): if not value.requires_grad: continue # Avoid duplicating parameters if value in memo: continue memo.add(value) hyperparams = copy.copy(defaults) if "backbone" in module_name: hyperparams["lr"] = hyperparams["lr"] * cfg.SOLVER.BACKBONE_MULTIPLIER if ( "relative_position_bias_table" in module_param_name or "absolute_pos_embed" in module_param_name ): print(module_param_name) hyperparams["weight_decay"] = 0.0 if isinstance(module, norm_module_types): hyperparams["weight_decay"] = weight_decay_norm if isinstance(module, torch.nn.Embedding): hyperparams["weight_decay"] = weight_decay_embed params.append({"params": [value], **hyperparams}) def maybe_add_full_model_gradient_clipping(optim): # detectron2 doesn't have full model gradient clipping now clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE enable = ( cfg.SOLVER.CLIP_GRADIENTS.ENABLED and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model" and clip_norm_val > 0.0 ) class FullModelGradientClippingOptimizer(optim): def step(self, closure=None): all_params = itertools.chain(*[x["params"] for x in self.param_groups]) torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val) super().step(closure=closure) return FullModelGradientClippingOptimizer if enable else optim optimizer_type = cfg.SOLVER.OPTIMIZER if optimizer_type == "SGD": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)( params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM ) elif optimizer_type == "ADAMW": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)( params, cfg.SOLVER.BASE_LR ) else: raise NotImplementedError(f"no optimizer type {optimizer_type}") if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": optimizer = maybe_add_gradient_clipping(cfg, optimizer) return optimizer @classmethod def test_with_TTA(cls, cfg, model): logger = logging.getLogger("detectron2.trainer") # In the end of training, run an evaluation with TTA. logger.info("Running inference with test-time augmentation ...") model = SemanticSegmentorWithTTA(cfg, model) evaluators = [ cls.build_evaluator( cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA") ) for name in cfg.DATASETS.TEST ] res = cls.test(cfg, model, evaluators) res = OrderedDict({k + "_TTA": v for k, v in res.items()}) return res def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() # for poly lr schedule add_deeplab_config(cfg) add_maskformer2_config(cfg)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ MasQCLIP Training Script. """ # MasQCLIP warnings.filterwarnings("ignore") class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic": mapper = MaskFormerPanopticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Instance segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance": mapper = MaskFormerInstanceDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # coco instance segmentation lsj new baseline elif cfg.INPUT.DATASET_MAPPER_NAME == "coco_instance_lsj": mapper = COCOInstanceNewBaselineDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # coco panoptic segmentation lsj new baseline elif cfg.INPUT.DATASET_MAPPER_NAME == "coco_panoptic_lsj": mapper = COCOPanopticNewBaselineDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) else: mapper = None return build_detection_train_loader(cfg, mapper=mapper) @classmethod def build_lr_scheduler(cls, cfg, optimizer): """ It now calls :func:`detectron2.solver.build_lr_scheduler`. Overwrite it if you'd like a different scheduler. """ return build_lr_scheduler(cfg, optimizer) @classmethod def build_optimizer(cls, cfg, model): weight_decay_norm = cfg.SOLVER.WEIGHT_DECAY_NORM weight_decay_embed = cfg.SOLVER.WEIGHT_DECAY_EMBED defaults = {} defaults["lr"] = cfg.SOLVER.BASE_LR defaults["weight_decay"] = cfg.SOLVER.WEIGHT_DECAY norm_module_types = ( torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d, torch.nn.SyncBatchNorm, # NaiveSyncBatchNorm inherits from BatchNorm2d torch.nn.GroupNorm, torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d, torch.nn.LayerNorm, torch.nn.LocalResponseNorm, ) params: List[Dict[str, Any]] = [] memo: Set[torch.nn.parameter.Parameter] = set() for module_name, module in model.named_modules(): for module_param_name, value in module.named_parameters(recurse=False): if not value.requires_grad: continue # Avoid duplicating parameters if value in memo: continue memo.add(value) hyperparams = copy.copy(defaults) if "backbone" in module_name: hyperparams["lr"] = hyperparams["lr"] * cfg.SOLVER.BACKBONE_MULTIPLIER if ( "relative_position_bias_table" in module_param_name or "absolute_pos_embed" in module_param_name ): print(module_param_name) hyperparams["weight_decay"] = 0.0 if isinstance(module, norm_module_types): hyperparams["weight_decay"] = weight_decay_norm if isinstance(module, torch.nn.Embedding): hyperparams["weight_decay"] = weight_decay_embed params.append({"params": [value], **hyperparams}) def maybe_add_full_model_gradient_clipping(optim): # detectron2 doesn't have full model gradient clipping now clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE enable = ( cfg.SOLVER.CLIP_GRADIENTS.ENABLED and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model" and clip_norm_val > 0.0 ) class FullModelGradientClippingOptimizer(optim): def step(self, closure=None): all_params = itertools.chain(*[x["params"] for x in self.param_groups]) torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val) super().step(closure=closure) return FullModelGradientClippingOptimizer if enable else optim optimizer_type = cfg.SOLVER.OPTIMIZER if optimizer_type == "SGD": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)( params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM ) elif optimizer_type == "ADAMW": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)( params, cfg.SOLVER.BASE_LR ) else: raise NotImplementedError(f"no optimizer type {optimizer_type}") if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": optimizer = maybe_add_gradient_clipping(cfg, optimizer) return optimizer @classmethod def test_with_TTA(cls, cfg, model): logger = logging.getLogger("detectron2.trainer") # In the end of training, run an evaluation with TTA. logger.info("Running inference with test-time augmentation ...") model = SemanticSegmentorWithTTA(cfg, model) evaluators = [ cls.build_evaluator( cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA") ) for name in cfg.DATASETS.TEST ] res = cls.test(cfg, model, evaluators) res = OrderedDict({k + "_TTA": v for k, v in res.items()}) return res def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() # for poly lr schedule add_deeplab_config(cfg) add_maskformer2_config(cfg)
add_masqclip_config(cfg)
1
2023-10-13 02:43:53+00:00
16k
ielab/llm-rankers
run.py
[ { "identifier": "SearchResult", "path": "rankers/rankers.py", "snippet": "class SearchResult:\n docid: str\n score: float\n text: str" }, { "identifier": "PointwiseLlmRanker", "path": "rankers/pointwise.py", "snippet": "class PointwiseLlmRanker(LlmRanker):\n\n def __init__(self, model_name_or_path, tokenizer_name_or_path, device, method=\"qlm\", batch_size=1, cache_dir=None):\n self.tokenizer = T5Tokenizer.from_pretrained(tokenizer_name_or_path\n if tokenizer_name_or_path is not None else\n model_name_or_path,\n cache_dir=cache_dir)\n self.llm = T5ForConditionalGeneration.from_pretrained(model_name_or_path,\n device_map='auto',\n torch_dtype=torch.float16 if device == 'cuda'\n else torch.float32,\n cache_dir=cache_dir)\n\n self.device = device\n self.method = method\n self.batch_size = batch_size\n\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n\n def rerank(self, query: str, ranking: List[SearchResult]) -> List[SearchResult]:\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n\n if self.method == \"qlm\":\n prompt = \"Passage: {text}\\nPlease write a question based on this passage.\"\n data = [prompt.format(text=doc.text) for doc in ranking]\n dataset = Text2TextGenerationDataset(data, self.tokenizer)\n loader = DataLoader(\n dataset,\n batch_size=self.batch_size,\n collate_fn=DataCollatorWithPadding(\n self.tokenizer,\n max_length=512,\n padding='longest',\n ),\n shuffle=False,\n drop_last=False,\n num_workers=4\n )\n\n labels = self.tokenizer.encode(f\"<pad> {query}\",\n return_tensors=\"pt\",\n add_special_tokens=False).to(self.llm.device).repeat(self.batch_size, 1)\n current_id = 0\n with torch.no_grad():\n for batch_inputs in tqdm(loader):\n self.total_compare += 1\n self.total_prompt_tokens += batch_inputs['input_ids'].shape[0] * batch_inputs['input_ids'].shape[1]\n\n batch_labels = labels if labels.shape[0] == len(batch_inputs['input_ids']) \\\n else labels[:len(batch_inputs['input_ids']), :] # last batch might be smaller\n self.total_prompt_tokens += batch_labels.shape[0] * batch_labels.shape[\n 1] # we count decoder inputs as part of prompt.\n\n batch_inputs = batch_inputs.to(self.llm.device)\n logits = self.llm(input_ids=batch_inputs['input_ids'],\n attention_mask=batch_inputs['attention_mask'],\n labels=batch_labels).logits\n\n loss_fct = torch.nn.CrossEntropyLoss(reduction=\"none\")\n scores = loss_fct(logits.view(-1, logits.size(-1)), batch_labels.view(-1))\n scores = -1 * scores.view(-1, batch_labels.size(-1)).sum(dim=1) # neg log prob\n for score in scores:\n ranking[current_id].score = score.item()\n current_id += 1\n\n elif self.method == \"yes_no\":\n prompt = \"Passage: {text}\\nQuery: {query}\\nDoes the passage answer the query? Answer 'Yes' or 'No'\"\n yes_id = self.tokenizer.encode(\"Yes\", add_special_tokens=False)[0]\n no_id = self.tokenizer.encode(\"No\", add_special_tokens=False)[0]\n data = [prompt.format(text=doc.text, query=query) for doc in ranking]\n dataset = Text2TextGenerationDataset(data, self.tokenizer)\n loader = DataLoader(\n dataset,\n batch_size=self.batch_size,\n collate_fn=DataCollatorWithPadding(\n self.tokenizer,\n max_length=512,\n padding='longest',\n ),\n shuffle=False,\n drop_last=False,\n num_workers=4\n )\n decoder_input_ids = torch.Tensor([self.tokenizer.pad_token_id]).to(self.llm.device, dtype=torch.long).repeat(self.batch_size, 1)\n current_id = 0\n with torch.no_grad():\n for batch_inputs in tqdm(loader):\n self.total_compare += 1\n self.total_prompt_tokens += batch_inputs['input_ids'].shape[0] * batch_inputs['input_ids'].shape[1]\n\n batch_inputs = batch_inputs.to(self.llm.device)\n\n batch_decoder_input_ids = decoder_input_ids if decoder_input_ids.shape[0] == len(batch_inputs['input_ids']) \\\n else decoder_input_ids[:len(batch_inputs['input_ids']), :] # last batch might be smaller\n\n self.total_prompt_tokens += batch_decoder_input_ids.shape[0] * batch_decoder_input_ids.shape[\n 1]\n\n logits = self.llm(input_ids=batch_inputs['input_ids'],\n attention_mask=batch_inputs['attention_mask'],\n decoder_input_ids=batch_decoder_input_ids).logits\n yes_scores = logits[:, :, yes_id]\n no_scores = logits[:, :, no_id]\n batch_scores = torch.cat((yes_scores, no_scores), dim=1)\n batch_scores = torch.nn.functional.softmax(batch_scores, dim=1)\n scores = batch_scores[:, 0]\n for score in scores:\n ranking[current_id].score = score.item()\n current_id += 1\n\n ranking = sorted(ranking, key=lambda x: x.score, reverse=True)\n return ranking\n\n def truncate(self, text, length):\n return self.tokenizer.convert_tokens_to_string(self.tokenizer.tokenize(text)[:length])" }, { "identifier": "MonoT5LlmRanker", "path": "rankers/pointwise.py", "snippet": "class MonoT5LlmRanker(PointwiseLlmRanker):\n def rerank(self, query: str, ranking: List[SearchResult]) -> List[SearchResult]:\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n prompt = \"Query: {query} Document: {document} Relevant:\"\n data = [prompt.format(query=query, document=doc.text) for doc in ranking]\n dataset = Text2TextGenerationDataset(data, self.tokenizer)\n loader = DataLoader(\n dataset,\n batch_size=self.batch_size,\n collate_fn=DataCollatorWithPadding(\n self.tokenizer,\n max_length=512,\n padding='longest',\n ),\n shuffle=False,\n drop_last=False,\n num_workers=4\n )\n decoder_input_ids = torch.Tensor([self.llm.config.decoder_start_token_id]).to(self.llm.device, dtype=torch.long).repeat(\n self.batch_size, 1)\n current_id = 0\n with torch.no_grad():\n for batch_inputs in tqdm(loader):\n self.total_compare += 1\n self.total_prompt_tokens += batch_inputs['input_ids'].shape[0] * batch_inputs['input_ids'].shape[1]\n\n batch_inputs = batch_inputs.to(self.llm.device)\n\n batch_decoder_input_ids = decoder_input_ids if decoder_input_ids.shape[0] == len(\n batch_inputs['input_ids']) \\\n else decoder_input_ids[:len(batch_inputs['input_ids']), :] # last batch might be smaller\n\n self.total_prompt_tokens += batch_decoder_input_ids.shape[0] * batch_decoder_input_ids.shape[\n 1]\n\n logits = self.llm(input_ids=batch_inputs['input_ids'],\n attention_mask=batch_inputs['attention_mask'],\n decoder_input_ids=batch_decoder_input_ids).logits\n\n # 6136 and 1176 are the indexes of the tokens false and true in T5.\n batch_scores = logits[:, 0, [6136, 1176]]\n batch_scores = torch.nn.functional.softmax(batch_scores, dim=1)\n scores = batch_scores[:, 1]\n for score in scores:\n ranking[current_id].score = score.item()\n current_id += 1\n\n ranking = sorted(ranking, key=lambda x: x.score, reverse=True)\n return ranking" }, { "identifier": "SetwiseLlmRanker", "path": "rankers/setwise.py", "snippet": "class SetwiseLlmRanker(LlmRanker):\n CHARACTERS = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\",\n \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\", \"W\"] # \"Passage X\" and \"Passage Y\" will be tokenized into 3 tokens, so we dont use for now\n\n def __init__(self,\n model_name_or_path,\n tokenizer_name_or_path,\n device,\n num_child=3,\n k=10,\n scoring='generation',\n method=\"heapsort\",\n num_permutation=1,\n cache_dir=None):\n\n self.device = device\n self.num_child = num_child\n self.num_permutation = num_permutation\n self.k = k\n self.config = AutoConfig.from_pretrained(model_name_or_path, cache_dir=cache_dir)\n if self.config.model_type == 't5':\n self.tokenizer = T5Tokenizer.from_pretrained(tokenizer_name_or_path\n if tokenizer_name_or_path is not None else\n model_name_or_path,\n cache_dir=cache_dir)\n self.llm = T5ForConditionalGeneration.from_pretrained(model_name_or_path,\n device_map='auto',\n torch_dtype=torch.float16 if device == 'cuda'\n else torch.float32,\n cache_dir=cache_dir)\n self.decoder_input_ids = self.tokenizer.encode(\"<pad> Passage\",\n return_tensors=\"pt\",\n add_special_tokens=False).to(self.device) if self.tokenizer else None\n\n test = []\n for i in range(len(self.CHARACTERS)):\n test.append(f'<pad> Passage {self.CHARACTERS[i]}')\n\n self.target_token_ids = self.tokenizer.batch_encode_plus([f'<pad> Passage {self.CHARACTERS[i]}'\n for i in range(len(self.CHARACTERS))],\n return_tensors=\"pt\",\n add_special_tokens=False,\n padding=True).input_ids[:, -1]\n elif self.config.model_type == 'llama':\n self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, cache_dir=cache_dir)\n self.tokenizer.use_default_system_prompt = False\n if 'vicuna' and 'v1.5' in model_name_or_path:\n self.tokenizer.chat_template = \"{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = 'A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user\\\\'s questions.' %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 %}{{ system_message }}{% endif %}{% if message['role'] == 'user' %}{{ ' USER: ' + message['content'].strip() }}{% elif message['role'] == 'assistant' %}{{ ' ASSISTANT: ' + message['content'].strip() + eos_token }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ ' ASSISTANT:' }}{% endif %}\"\n self.llm = AutoModelForCausalLM.from_pretrained(model_name_or_path,\n device_map='auto',\n torch_dtype=torch.float16 if device == 'cuda'\n else torch.float32,\n cache_dir=cache_dir).eval()\n\n self.scoring = scoring\n self.method = method\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n\n def compare(self, query: str, docs: List):\n self.total_compare += 1 if self.num_permutation == 1 else self.num_permutation\n\n passages = \"\\n\\n\".join([f'Passage {self.CHARACTERS[i]}: \"{doc.text}\"' for i, doc in enumerate(docs)])\n input_text = f'Given a query \"{query}\", which of the following passages is the most relevant one to the query?\\n\\n' \\\n + passages + '\\n\\nOutput only the passage label of the most relevant passage:'\n\n if self.scoring == 'generation':\n if self.config.model_type == 't5':\n\n if self.num_permutation == 1:\n input_ids = self.tokenizer(input_text, return_tensors=\"pt\").input_ids.to(self.device)\n self.total_prompt_tokens += input_ids.shape[1]\n\n output_ids = self.llm.generate(input_ids,\n decoder_input_ids=self.decoder_input_ids,\n max_new_tokens=2)[0]\n\n self.total_completion_tokens += output_ids.shape[0]\n\n output = self.tokenizer.decode(output_ids,\n skip_special_tokens=True).strip()\n output = output[-1]\n else:\n id_passage = [(i, p) for i, p in enumerate(docs)]\n labels = [self.CHARACTERS[i] for i in range(len(docs))]\n batch_data = []\n for _ in range(self.num_permutation):\n batch_data.append([random.sample(id_passage, len(id_passage)),\n random.sample(labels, len(labels))])\n\n batch_ref = []\n input_text = []\n for batch in batch_data:\n ref = []\n passages = []\n characters = []\n for p, c in zip(batch[0], batch[1]):\n ref.append(p[0])\n passages.append(p[1].text)\n characters.append(c)\n batch_ref.append((ref, characters))\n passages = \"\\n\\n\".join([f'Passage {characters[i]}: \"{passages[i]}\"' for i in range(len(passages))])\n input_text.append(f'Given a query \"{query}\", which of the following passages is the most relevant one to the query?\\n\\n' \\\n + passages + '\\n\\nOutput only the passage label of the most relevant passage:')\n\n input_ids = self.tokenizer(input_text, return_tensors=\"pt\").input_ids.to(self.device)\n self.total_prompt_tokens += input_ids.shape[1] * input_ids.shape[0]\n\n output_ids = self.llm.generate(input_ids,\n decoder_input_ids=self.decoder_input_ids.repeat(input_ids.shape[0], 1),\n max_new_tokens=2)\n output = self.tokenizer.batch_decode(output_ids[:, self.decoder_input_ids.shape[1]:],\n skip_special_tokens=True)\n\n # vote\n candidates = []\n for ref, result in zip(batch_ref, output):\n result = result.strip().upper()\n docids, characters = ref\n if len(result) != 1 or result not in characters:\n print(f\"Unexpected output: {result}\")\n continue\n win_doc = docids[characters.index(result)]\n candidates.append(win_doc)\n\n if len(candidates) == 0:\n print(f\"Unexpected voting: {output}\")\n output = \"Unexpected voting.\"\n else:\n # handle tie\n candidate_counts = Counter(candidates)\n max_count = max(candidate_counts.values())\n most_common_candidates = [candidate for candidate, count in candidate_counts.items() if\n count == max_count]\n if len(most_common_candidates) == 1:\n output = self.CHARACTERS[most_common_candidates[0]]\n else:\n output = self.CHARACTERS[random.choice(most_common_candidates)]\n\n elif self.config.model_type == 'llama':\n conversation = [{\"role\": \"user\", \"content\": input_text}]\n\n prompt = self.tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)\n prompt += \" Passage:\"\n\n input_ids = self.tokenizer(prompt, return_tensors=\"pt\").input_ids.to(self.device)\n self.total_prompt_tokens += input_ids.shape[1]\n\n output_ids = self.llm.generate(input_ids,\n do_sample=False,\n temperature=0.0,\n top_p=None,\n max_new_tokens=1)[0]\n\n self.total_completion_tokens += output_ids.shape[0]\n\n output = self.tokenizer.decode(output_ids[input_ids.shape[1]:],\n skip_special_tokens=True).strip().upper()\n else:\n raise NotImplementedError\n\n elif self.scoring == 'likelihood':\n if self.config.model_type == 't5':\n input_ids = self.tokenizer(input_text, return_tensors=\"pt\").input_ids.to(self.device)\n self.total_prompt_tokens += input_ids.shape[1]\n with torch.no_grad():\n logits = self.llm(input_ids=input_ids, decoder_input_ids=self.decoder_input_ids).logits[0][-1]\n distributions = torch.softmax(logits, dim=0)\n scores = distributions[self.target_token_ids[:len(docs)]]\n ranked = sorted(zip(self.CHARACTERS[:len(docs)], scores), key=lambda x: x[1], reverse=True)\n output = ranked[0][0]\n\n else:\n raise NotImplementedError\n\n if len(output) == 1 and output in self.CHARACTERS:\n pass\n else:\n print(f\"Unexpected output: {output}\")\n\n return output\n\n def heapify(self, arr, n, i, query):\n # Find largest among root and children\n if self.num_child * i + 1 < n: # if there are children\n docs = [arr[i]] + arr[self.num_child * i + 1: min((self.num_child * (i + 1) + 1), n)]\n inds = [i] + list(range(self.num_child * i + 1, min((self.num_child * (i + 1) + 1), n)))\n output = self.compare(query, docs)\n try:\n best_ind = self.CHARACTERS.index(output)\n except ValueError:\n best_ind = 0\n try:\n largest = inds[best_ind]\n except IndexError:\n largest = i\n # If root is not largest, swap with largest and continue heapifying\n if largest != i:\n arr[i], arr[largest] = arr[largest], arr[i]\n self.heapify(arr, n, largest, query)\n\n def heapSort(self, arr, query, k):\n n = len(arr)\n ranked = 0\n # Build max heap\n for i in range(n // self.num_child, -1, -1):\n self.heapify(arr, n, i, query)\n for i in range(n - 1, 0, -1):\n # Swap\n arr[i], arr[0] = arr[0], arr[i]\n ranked += 1\n if ranked == k:\n break\n # Heapify root element\n self.heapify(arr, i, 0, query)\n\n def rerank(self, query: str, ranking: List[SearchResult]) -> List[SearchResult]:\n original_ranking = copy.deepcopy(ranking)\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n if self.method == \"heapsort\":\n self.heapSort(ranking, query, self.k)\n ranking = list(reversed(ranking))\n\n # elif self.method == \"bubblesort\":\n # for i in range(k):\n # start_ind = len(ranking) - (self.num_child + 1)\n # end_ind = len(ranking)\n # while True:\n # if start_ind < i:\n # start_ind = i\n # output = self.compare(query, ranking[start_ind:end_ind])\n # try:\n # best_ind = self.CHARACTERS.index(output)\n # except ValueError:\n # best_ind = 0\n # if best_ind != 0:\n # ranking[start_ind], ranking[start_ind + best_ind] = ranking[start_ind + best_ind], ranking[start_ind]\n #\n # if start_ind == i:\n # break\n #\n # start_ind -= self.num_child\n # end_ind -= self.num_child\n elif self.method == \"bubblesort\":\n last_start = len(ranking) - (self.num_child + 1)\n\n for i in range(self.k):\n start_ind = last_start\n end_ind = last_start + (self.num_child + 1)\n is_change = False\n while True:\n if start_ind < i:\n start_ind = i\n output = self.compare(query, ranking[start_ind:end_ind])\n try:\n best_ind = self.CHARACTERS.index(output)\n except ValueError:\n best_ind = 0\n if best_ind != 0:\n ranking[start_ind], ranking[start_ind + best_ind] = ranking[start_ind + best_ind], ranking[start_ind]\n if not is_change:\n is_change = True\n if last_start != len(ranking) - (self.num_child + 1) \\\n and best_ind == len(ranking[start_ind:end_ind])-1:\n last_start += len(ranking[start_ind:end_ind])-1\n\n if start_ind == i:\n break\n\n if not is_change:\n last_start -= self.num_child\n\n start_ind -= self.num_child\n end_ind -= self.num_child\n\n else:\n raise NotImplementedError(f'Method {self.method} is not implemented.')\n\n results = []\n top_doc_ids = set()\n rank = 1\n\n for i, doc in enumerate(ranking[:self.k]):\n top_doc_ids.add(doc.docid)\n results.append(SearchResult(docid=doc.docid, score=-rank, text=None))\n rank += 1\n for doc in original_ranking:\n if doc.docid not in top_doc_ids:\n results.append(SearchResult(docid=doc.docid, score=-rank, text=None))\n rank += 1\n\n return results\n\n def truncate(self, text, length):\n return self.tokenizer.convert_tokens_to_string(self.tokenizer.tokenize(text)[:length])" }, { "identifier": "OpenAiSetwiseLlmRanker", "path": "rankers/setwise.py", "snippet": "class OpenAiSetwiseLlmRanker(SetwiseLlmRanker):\n def __init__(self, model_name_or_path, api_key, num_child=3, method='heapsort', k=10):\n self.llm = model_name_or_path\n self.tokenizer = tiktoken.encoding_for_model(model_name_or_path)\n self.num_child = num_child\n self.method = method\n self.k = k\n self.total_compare = 0\n self.total_prompt_tokens = 0\n self.total_completion_tokens = 0\n self.system_prompt = \"You are RankGPT, an intelligent assistant specialized in selecting the most relevant passage from a pool of passages based on their relevance to the query.\"\n openai.api_key = api_key\n\n def compare(self, query: str, docs: List):\n self.total_compare += 1\n passages = \"\\n\\n\".join([f'Passage {self.CHARACTERS[i]}: \"{doc.text}\"' for i, doc in enumerate(docs)])\n input_text = f'Given a query \"{query}\", which of the following passages is the most relevant one to the query?\\n\\n' \\\n + passages + '\\n\\nOutput only the passage label of the most relevant passage.'\n\n while True:\n try:\n response = openai.ChatCompletion.create(\n model=self.llm,\n messages=[\n {\"role\": \"system\", \"content\": self.system_prompt},\n {\"role\": \"user\", \"content\": input_text},\n ],\n temperature=0.0,\n request_timeout=15\n )\n\n self.total_completion_tokens += int(response['usage']['completion_tokens'])\n self.total_prompt_tokens += int(response['usage']['prompt_tokens'])\n\n output = response['choices'][0]['message']['content']\n matches = re.findall(r\"(Passage [A-Z])\", output, re.MULTILINE)\n if matches:\n output = matches[0][8]\n elif output.strip() in self.CHARACTERS:\n pass\n else:\n print(f\"Unexpected output: {output}\")\n output = \"A\"\n return output\n\n except openai.error.APIError as e:\n # Handle API error here, e.g. retry or log\n print(f\"OpenAI API returned an API Error: {e}\")\n time.sleep(5)\n continue\n except openai.error.APIConnectionError as e:\n # Handle connection error here\n print(f\"Failed to connect to OpenAI API: {e}\")\n time.sleep(5)\n continue\n except openai.error.RateLimitError as e:\n # Handle rate limit error (we recommend using exponential backoff)\n print(f\"OpenAI API request exceeded rate limit: {e}\")\n time.sleep(5)\n continue\n except openai.error.InvalidRequestError as e:\n # Handle invalid request error\n print(f\"OpenAI API request was invalid: {e}\")\n raise e\n except openai.error.AuthenticationError as e:\n # Handle authentication error\n print(f\"OpenAI API request failed authentication: {e}\")\n raise e\n except openai.error.Timeout as e:\n # Handle timeout error\n print(f\"OpenAI API request timed out: {e}\")\n time.sleep(5)\n continue\n except openai.error.ServiceUnavailableError as e:\n # Handle service unavailable error\n print(f\"OpenAI API request failed with a service unavailable error: {e}\")\n time.sleep(5)\n continue\n except Exception as e:\n print(f\"Unknown error: {e}\")\n raise e\n\n def truncate(self, text, length):\n return self.tokenizer.decode(self.tokenizer.encode(text)[:length])" }, { "identifier": "PairwiseLlmRanker", "path": "rankers/pairwise.py", "snippet": "class PairwiseLlmRanker(LlmRanker):\n def __init__(self, model_name_or_path,\n tokenizer_name_or_path,\n device,\n method=\"allpair\",\n batch_size=2,\n k=10,\n cache_dir=None\n ):\n self.device = device\n self.method = method\n self.batch_size = batch_size\n self.k = k\n self.prompt = \"\"\"Given a query \"{query}\", which of the following two passages is more relevant to the query?\n\nPassage A: \"{doc1}\"\n\nPassage B: \"{doc2}\"\n\nOutput Passage A or Passage B:\"\"\"\n\n self.config = AutoConfig.from_pretrained(model_name_or_path, cache_dir=cache_dir)\n if self.config.model_type == 't5':\n self.tokenizer = T5Tokenizer.from_pretrained(tokenizer_name_or_path\n if tokenizer_name_or_path is not None else\n model_name_or_path, cache_dir=cache_dir)\n self.llm = T5ForConditionalGeneration.from_pretrained(model_name_or_path,\n device_map='auto',\n torch_dtype=torch.float16 if device == 'cuda'\n else torch.float32,\n cache_dir=cache_dir)\n self.decoder_input_ids = self.tokenizer.encode(\"<pad> Passage\",\n return_tensors=\"pt\",\n add_special_tokens=False).to(self.llm.device)\n self.decoder_input_ids = self.decoder_input_ids.repeat(self.batch_size, 1)\n elif self.config.model_type == 'llama':\n self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, cache_dir=cache_dir)\n self.tokenizer.use_default_system_prompt = False\n if 'vicuna' and 'v1.5' in model_name_or_path:\n self.tokenizer.chat_template = \"{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = 'A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user\\\\'s questions.' %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 %}{{ system_message }}{% endif %}{% if message['role'] == 'user' %}{{ ' USER: ' + message['content'].strip() }}{% elif message['role'] == 'assistant' %}{{ ' ASSISTANT: ' + message['content'].strip() + eos_token }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ ' ASSISTANT:' }}{% endif %}\"\n\n self.tokenizer.pad_token = \"[PAD]\"\n self.tokenizer.padding_side = \"left\"\n self.llm = AutoModelForCausalLM.from_pretrained(model_name_or_path,\n device_map='auto',\n torch_dtype=torch.float16 if device == 'cuda'\n else torch.float32,\n cache_dir=cache_dir).eval()\n else:\n raise NotImplementedError\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n\n def compare(self, query: str, docs: List):\n self.total_compare += 1\n doc1, doc2 = docs[0], docs[1]\n input_texts = [self.prompt.format(query=query, doc1=doc1, doc2=doc2),\n self.prompt.format(query=query, doc1=doc2, doc2=doc1)]\n if self.config.model_type == 't5':\n input_ids = self.tokenizer(input_texts,\n padding='longest',\n return_tensors=\"pt\").input_ids.to(self.llm.device)\n\n self.total_prompt_tokens += input_ids.shape[0] * input_ids.shape[1]\n\n output_ids = self.llm.generate(input_ids,\n decoder_input_ids=self.decoder_input_ids,\n max_new_tokens=2)\n\n self.total_completion_tokens += output_ids.shape[0] * output_ids.shape[1]\n\n output = self.tokenizer.batch_decode(output_ids, skip_special_tokens=True)\n\n elif self.config.model_type == 'llama':\n conversation0 = [{\"role\": \"user\", \"content\": input_texts[0]}]\n conversation1 = [{\"role\": \"user\", \"content\": input_texts[1]}]\n\n prompt0 = self.tokenizer.apply_chat_template(conversation0, tokenize=False, add_generation_prompt=True)\n prompt0 += \" Passage:\"\n prompt1 = self.tokenizer.apply_chat_template(conversation1, tokenize=False, add_generation_prompt=True)\n prompt1 += \" Passage:\"\n\n input_ids = self.tokenizer([prompt0, prompt1], return_tensors=\"pt\").input_ids.to(self.device)\n self.total_prompt_tokens += input_ids.shape[0] * input_ids.shape[1]\n\n output_ids = self.llm.generate(input_ids,\n do_sample=False,\n temperature=0.0,\n top_p=None,\n max_new_tokens=1)\n\n self.total_completion_tokens += output_ids.shape[0] * output_ids.shape[1]\n\n output0 = self.tokenizer.decode(output_ids[0][input_ids.shape[1]:],\n skip_special_tokens=True).strip().upper()\n output1 = self.tokenizer.decode(output_ids[1][input_ids.shape[1]:],\n skip_special_tokens=True).strip().upper()\n return [f'Passage {output0}', f'Passage {output1}']\n else:\n raise NotImplementedError\n\n return output\n\n def heapify(self, arr, n, i):\n # Find largest among root and children\n largest = i\n l = 2 * i + 1\n r = 2 * i + 2\n if l < n and arr[l] > arr[i]:\n largest = l\n\n if r < n and arr[r] > arr[largest]:\n largest = r\n\n # If root is not largest, swap with largest and continue heapifying\n if largest != i:\n arr[i], arr[largest] = arr[largest], arr[i]\n self.heapify(arr, n, largest)\n\n def heapSort(self, arr, k):\n n = len(arr)\n ranked = 0\n # Build max heap\n for i in range(n // 2, -1, -1):\n self.heapify(arr, n, i)\n for i in range(n - 1, 0, -1):\n # Swap\n arr[i], arr[0] = arr[0], arr[i]\n ranked += 1\n if ranked == k:\n break\n # Heapify root element\n self.heapify(arr, i, 0)\n\n def rerank(self, query: str, ranking: List[SearchResult]) -> List[SearchResult]:\n original_ranking = copy.deepcopy(ranking)\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n if self.method == \"allpair\":\n doc_pairs = list(combinations(ranking, 2))\n allpairs = []\n for doc1, doc2 in tqdm(doc_pairs):\n allpairs.append(self.prompt.format(query=query, doc1=doc1.text, doc2=doc2.text))\n allpairs.append(self.prompt.format(query=query, doc1=doc2.text, doc2=doc1.text))\n\n allpairs_dataset = Text2TextGenerationDataset(allpairs, self.tokenizer)\n\n loader = DataLoader(\n allpairs_dataset,\n batch_size=self.batch_size,\n collate_fn=DataCollatorWithPadding(\n self.tokenizer,\n max_length=512,\n padding='longest',\n ),\n shuffle=False,\n drop_last=False,\n num_workers=4\n )\n\n outputs = []\n for batch_inputs in tqdm(loader):\n self.total_compare += 1\n self.total_prompt_tokens += batch_inputs['input_ids'].shape[0] * batch_inputs['input_ids'].shape[1]\n\n batch_outputs = self.llm.generate(batch_inputs['input_ids'].to(self.llm.device),\n decoder_input_ids=self.decoder_input_ids\n if self.decoder_input_ids.shape[0] == len(batch_inputs['input_ids'])\n else self.decoder_input_ids[:len(batch_inputs['input_ids']), :], # last batch might be smaller\n max_new_tokens=2)\n self.total_completion_tokens += batch_outputs.shape[0] * batch_outputs.shape[1]\n outputs.extend(batch_outputs.cpu().numpy())\n\n outputs = self.tokenizer.batch_decode(outputs, skip_special_tokens=True)\n scores = defaultdict(float)\n for i in range(0, len(outputs), 2):\n doc1, doc2 = doc_pairs[i//2]\n output1 = outputs[i]\n output2 = outputs[i + 1]\n if output1 == \"Passage A\" and output2 == \"Passage B\":\n scores[doc1.docid] += 1\n elif output1 == \"Passage B\" and output2 == \"Passage A\":\n scores[doc2.docid] += 1\n else: # conflict\n scores[doc1.docid] += 0.5\n scores[doc2.docid] += 0.5\n\n ranking = sorted([SearchResult(docid=docid, score=score, text=None) for docid, score in scores.items()],\n key=lambda x: x.score, reverse=True)\n\n elif self.method == \"heapsort\":\n class ComparableDoc:\n def __init__(self, docid, text, ranker):\n self.docid = docid\n self.text = text\n self.ranker = ranker\n\n def __gt__(self, other):\n out = self.ranker.compare(query, [self.text, other.text])\n if out[0] == \"Passage A\" and out[1] == \"Passage B\":\n return True\n else:\n return False\n\n arr = [ComparableDoc(docid=doc.docid, text=doc.text, ranker=self) for doc in ranking]\n self.heapSort(arr, self.k)\n ranking = [SearchResult(docid=doc.docid, score=-i, text=None) for i, doc in enumerate(reversed(arr))]\n\n #\n # elif self.method == \"bubblesort\":\n # k = min(k, len(ranking))\n # for i in range(k):\n # current_ind = len(ranking) - 1\n # while True:\n # if current_ind == i:\n # break\n # doc1 = ranking[current_ind]\n # doc2 = ranking[current_ind - 1]\n # output = self.compare(query, [doc1.text, doc2.text])\n # if output[0] == \"Passage A\" and output[1] == \"Passage B\":\n # ranking[current_ind - 1], ranking[current_ind] = ranking[current_ind], ranking[current_ind - 1]\n # current_ind -= 1\n elif self.method == \"bubblesort\":\n k = min(self.k, len(ranking))\n\n last_end = len(ranking) - 1\n for i in range(k):\n current_ind = last_end\n is_change = False\n while True:\n if current_ind <= i:\n break\n doc1 = ranking[current_ind]\n doc2 = ranking[current_ind - 1]\n output = self.compare(query, [doc1.text, doc2.text])\n if output[0] == \"Passage A\" and output[1] == \"Passage B\":\n ranking[current_ind - 1], ranking[current_ind] = ranking[current_ind], ranking[current_ind - 1]\n\n if not is_change:\n is_change = True\n if last_end != len(ranking) - 1: # skip unchanged pairs at the bottom\n last_end += 1\n if not is_change:\n last_end -= 1\n current_ind -= 1\n else:\n raise NotImplementedError(f'Method {self.method} is not implemented.')\n\n results = []\n top_doc_ids = set()\n rank = 1\n for i, doc in enumerate(ranking[:self.k]):\n top_doc_ids.add(doc.docid)\n results.append(SearchResult(docid=doc.docid, score=-rank, text=None))\n rank += 1\n for doc in original_ranking:\n if doc.docid not in top_doc_ids:\n results.append(SearchResult(docid=doc.docid, score=-rank, text=None))\n rank += 1\n return results\n\n def truncate(self, text, length):\n return self.tokenizer.convert_tokens_to_string(self.tokenizer.tokenize(text)[:length])" }, { "identifier": "DuoT5LlmRanker", "path": "rankers/pairwise.py", "snippet": "class DuoT5LlmRanker(PairwiseLlmRanker):\n def compare(self, query: str, docs: List[str]) -> bool:\n self.total_compare += 1\n self.prompt = 'Query: {query} Document0: {doc1} Document1: {doc2} Relevant:'\n\n inputs = [self.prompt.format(query=query, doc1=docs[0], doc2=docs[1]),\n self.prompt.format(query=query, doc1=docs[1], doc2=docs[0])]\n inputs = self.tokenizer(inputs, padding=True, truncation=True, return_tensors=\"pt\").to(self.llm.device)\n decode_ids = torch.full((2, 1),\n self.llm.config.decoder_start_token_id,\n dtype=torch.long, device=self.llm.device)\n\n self.total_prompt_tokens += inputs['input_ids'].shape[0] * inputs['input_ids'].shape[1]\n\n with torch.no_grad():\n logits = self.llm(input_ids=inputs['input_ids'],\n attention_mask=inputs['attention_mask'],\n decoder_input_ids=decode_ids).logits\n # 6136 and 1176 are the indexes of the tokens false and true in T5.\n batch_scores = logits[:, 0, [6136, 1176]]\n batch_scores = torch.nn.functional.softmax(batch_scores, dim=1)\n batch_probs = batch_scores[:, 1]\n return batch_probs[0] > batch_probs[1]\n\n def rerank(self, query: str, ranking: List[SearchResult]) -> List[SearchResult]:\n original_ranking = copy.deepcopy(ranking)\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n if self.method == \"heapsort\":\n class ComparableDoc:\n def __init__(self, docid, text, ranker):\n self.docid = docid\n self.text = text\n self.ranker = ranker\n\n def __gt__(self, other):\n return self.ranker.compare(query, [self.text, other.text])\n arr = [ComparableDoc(docid=doc.docid, text=doc.text, ranker=self) for doc in ranking]\n self.heapSort(arr, self.k)\n ranking = [SearchResult(docid=doc.docid, score=-i, text=None) for i, doc in enumerate(reversed(arr))]\n\n else:\n raise NotImplementedError(f'Method {self.method} is not implemented.')\n\n results = []\n top_doc_ids = set()\n rank = 1\n for i, doc in enumerate(ranking[:self.k]):\n top_doc_ids.add(doc.docid)\n results.append(SearchResult(docid=doc.docid, score=-rank, text=None))\n rank += 1\n for doc in original_ranking:\n if doc.docid not in top_doc_ids:\n results.append(SearchResult(docid=doc.docid, score=-rank, text=None))\n rank += 1\n return results" }, { "identifier": "OpenAiPairwiseLlmRanker", "path": "rankers/pairwise.py", "snippet": "class OpenAiPairwiseLlmRanker(PairwiseLlmRanker):\n def __init__(self,\n model_name_or_path,\n api_key,\n method=\"heapsort\",\n batch_size=2,\n k=10):\n self.llm = model_name_or_path\n self.tokenizer = tiktoken.encoding_for_model(model_name_or_path)\n self.method = method\n self.k = k\n self.total_compare = 0\n self.total_prompt_tokens = 0\n self.total_completion_tokens = 0\n self.CHARACTERS = [\"A\", \"B\"]\n self.system_prompt = \"You are RankGPT, an intelligent assistant specialized in selecting the most relevant passage from a pair of passages based on their relevance to the query.\"\n self.prompt = \"\"\"Given a query \"{query}\", which of the following two passages is more relevant to the query?\n \nPassage A: \"{doc1}\"\n\nPassage B: \"{doc2}\"\n\nOutput Passage A or Passage B:\"\"\"\n openai.api_key = api_key\n\n def _get_response(self, input_text):\n while True:\n try:\n response = openai.ChatCompletion.create(\n model=self.llm,\n messages=[\n {\"role\": \"system\", \"content\": self.system_prompt},\n {\"role\": \"user\", \"content\": input_text},\n ],\n temperature=0.0,\n request_timeout=15\n )\n self.total_completion_tokens += int(response['usage']['completion_tokens'])\n self.total_prompt_tokens += int(response['usage']['prompt_tokens'])\n\n output = response['choices'][0]['message']['content']\n matches = re.findall(r\"(Passage [A-B])\", output, re.MULTILINE)\n if matches:\n output = matches[0][8]\n elif output.strip() in self.CHARACTERS:\n pass\n else:\n print(f\"Unexpected output: {output}\")\n output = \"A\"\n return output\n\n except openai.error.APIError as e:\n # Handle API error here, e.g. retry or log\n print(f\"OpenAI API returned an API Error: {e}\")\n time.sleep(5)\n continue\n except openai.error.APIConnectionError as e:\n # Handle connection error here\n print(f\"Failed to connect to OpenAI API: {e}\")\n time.sleep(5)\n continue\n except openai.error.RateLimitError as e:\n # Handle rate limit error (we recommend using exponential backoff)\n print(f\"OpenAI API request exceeded rate limit: {e}\")\n time.sleep(5)\n continue\n except openai.error.InvalidRequestError as e:\n # Handle invalid request error\n print(f\"OpenAI API request was invalid: {e}\")\n raise e\n except openai.error.AuthenticationError as e:\n # Handle authentication error\n print(f\"OpenAI API request failed authentication: {e}\")\n raise e\n except openai.error.Timeout as e:\n # Handle timeout error\n print(f\"OpenAI API request timed out: {e}\")\n time.sleep(5)\n continue\n except openai.error.ServiceUnavailableError as e:\n # Handle service unavailable error\n print(f\"OpenAI API request failed with a service unavailable error: {e}\")\n time.sleep(5)\n continue\n except Exception as e:\n print(f\"Unknown error: {e}\")\n raise e\n\n def compare(self, query: str, docs: List):\n self.total_compare += 1\n doc1, doc2 = docs[0], docs[1]\n input_texts = [self.prompt.format(query=query, doc1=doc1, doc2=doc2),\n self.prompt.format(query=query, doc1=doc2, doc2=doc1)]\n\n return [f'Passage {self._get_response(input_texts[0])}', f'Passage {self._get_response(input_texts[1])}']\n\n def truncate(self, text, length):\n return self.tokenizer.decode(self.tokenizer.encode(text)[:length])" }, { "identifier": "OpenAiListwiseLlmRanker", "path": "rankers/listwise.py", "snippet": "class OpenAiListwiseLlmRanker(LlmRanker):\n def __init__(self, model_name_or_path, api_key, window_size, step_size, num_repeat):\n self.llm = model_name_or_path\n self.tokenizer = tiktoken.encoding_for_model(model_name_or_path)\n self.window_size = window_size\n self.step_size = step_size\n self.num_repeat = num_repeat\n openai.api_key = api_key\n self.total_compare = 0\n self.total_prompt_tokens = 0\n self.total_completion_tokens = 0\n\n def compare(self, query: str, docs: List):\n self.total_compare += 1\n messages = create_permutation_instruction_chat(query, docs, self.llm)\n while True:\n try:\n completion = openai.ChatCompletion.create(\n model=self.llm,\n messages=messages,\n temperature=0.0,\n request_timeout=15)\n self.total_completion_tokens += int(completion['usage']['completion_tokens'])\n self.total_prompt_tokens += int(completion['usage']['prompt_tokens'])\n return completion['choices'][0]['message']['content']\n except Exception as e:\n print(str(e))\n if \"This model's maximum context length is\" in str(e):\n print('reduce_length')\n return 'ERROR::reduce_length'\n\n def rerank(self, query: str, ranking: List[SearchResult]) -> List[SearchResult]:\n self.total_compare = 0\n self.total_prompt_tokens = 0\n self.total_completion_tokens = 0\n\n for _ in range(self.num_repeat):\n ranking = copy.deepcopy(ranking)\n end_pos = len(ranking)\n start_pos = end_pos - self.window_size\n while start_pos >= 0:\n start_pos = max(start_pos, 0)\n result = self.compare(query, ranking[start_pos: end_pos])\n ranking = receive_permutation(ranking, result, start_pos, end_pos)\n end_pos = end_pos - self.step_size\n start_pos = start_pos - self.step_size\n\n for i, doc in enumerate(ranking):\n doc.score = -i\n return ranking\n\n def truncate(self, text, length):\n return self.tokenizer.decode(self.tokenizer.encode(text)[:length])" }, { "identifier": "ListwiseLlmRanker", "path": "rankers/listwise.py", "snippet": "class ListwiseLlmRanker(OpenAiListwiseLlmRanker):\n CHARACTERS = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\",\n \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\",\n \"W\"] # \"Passage X\" and \"Passage Y\" will be tokenized into 3 tokens, so we dont use for now\n\n def __init__(self, model_name_or_path, tokenizer_name_or_path, device, window_size, step_size,\n scoring='generation', num_repeat=1, cache_dir=None):\n\n self.scoring = scoring\n self.device = device\n self.window_size = window_size\n self.step_size = step_size\n self.num_repeat = num_repeat\n self.config = AutoConfig.from_pretrained(model_name_or_path, cache_dir=cache_dir)\n\n if self.config.model_type == 't5':\n self.tokenizer = T5Tokenizer.from_pretrained(tokenizer_name_or_path\n if tokenizer_name_or_path is not None else\n model_name_or_path, cache_dir=cache_dir)\n self.llm = T5ForConditionalGeneration.from_pretrained(model_name_or_path,\n device_map='auto',\n torch_dtype=torch.float16 if device == 'cuda'\n else torch.float32,\n cache_dir=cache_dir)\n\n self.decoder_input_ids = self.tokenizer.encode(\"<pad> Passage\",\n return_tensors=\"pt\",\n add_special_tokens=False).to(self.device) if self.tokenizer else None\n self.target_token_ids = self.tokenizer.batch_encode_plus([f'<pad> Passage {self.CHARACTERS[i]}'\n for i in range(len(self.CHARACTERS))],\n return_tensors=\"pt\",\n add_special_tokens=False,\n padding=True).input_ids[:, -1]\n elif self.config.model_type == 'llama':\n self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, cache_dir=cache_dir)\n self.tokenizer.use_default_system_prompt = False\n if 'vicuna' and 'v1.5' in model_name_or_path:\n self.tokenizer.chat_template = \"{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = 'A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user\\\\'s questions.' %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 %}{{ system_message }}{% endif %}{% if message['role'] == 'user' %}{{ ' USER: ' + message['content'].strip() }}{% elif message['role'] == 'assistant' %}{{ ' ASSISTANT: ' + message['content'].strip() + eos_token }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ ' ASSISTANT:' }}{% endif %}\"\n\n self.llm = AutoModelForCausalLM.from_pretrained(model_name_or_path,\n device_map='auto',\n torch_dtype=torch.float16 if device == 'cuda'\n else torch.float32,\n cache_dir=cache_dir).eval()\n else:\n raise NotImplementedError\n\n def compare(self, query: str, docs: List):\n self.total_compare += 1\n if self.scoring == 'generation':\n if self.config.model_type == 't5':\n input_text = create_permutation_instruction_complete(query, docs)\n input_ids = self.tokenizer(input_text, return_tensors=\"pt\", truncation=True).input_ids.to(self.device)\n self.total_prompt_tokens += input_ids.shape[1]\n\n output_ids = self.llm.generate(input_ids)[0]\n self.total_completion_tokens += output_ids.shape[0]\n output = self.tokenizer.decode(output_ids,\n skip_special_tokens=True).strip()\n elif self.config.model_type == 'llama':\n input_text = create_permutation_instruction_chat(query, docs, model_name=None)\n input_ids = self.tokenizer.apply_chat_template(input_text, return_tensors=\"pt\",\n add_generation_prompt=True).to(self.device)\n\n self.total_prompt_tokens += input_ids.shape[1]\n\n output_ids = self.llm.generate(input_ids)[0]\n self.total_completion_tokens += output_ids.shape[0]\n output = self.tokenizer.decode(output_ids[input_ids.shape[1]:],\n skip_special_tokens=True).strip()\n\n elif self.scoring == 'likelihood':\n passages = \"\\n\\n\".join([f'Passage {self.CHARACTERS[i]}: \"{doc.text}\"' for i, doc in enumerate(docs)])\n input_text = f'Given a query \"{query}\", which of the following passages is the most relevant one to the query?\\n\\n' \\\n + passages + '\\n\\nOutput only the passage label of the most relevant passage:'\n\n input_ids = self.tokenizer(input_text, return_tensors=\"pt\").input_ids.to(self.device)\n self.total_prompt_tokens += input_ids.shape[1]\n\n with torch.no_grad():\n logits = self.llm(input_ids=input_ids, decoder_input_ids=self.decoder_input_ids).logits[0][-1]\n distributions = torch.softmax(logits, dim=0)\n scores = distributions[self.target_token_ids[:len(docs)]]\n ranked = sorted(zip([f\"[{str(i+1)}]\" for i in range(len(docs))], scores), key=lambda x: x[1], reverse=True)\n output = '>'.join(ranked[i][0] for i in range(len(ranked)))\n\n return output\n\n def truncate(self, text, length):\n return self.tokenizer.convert_tokens_to_string(self.tokenizer.tokenize(text)[:length])" } ]
import logging import ir_datasets import argparse import sys import json import time import random from pyserini.search.lucene import LuceneSearcher from pyserini.search._base import get_topics from rankers.rankers import SearchResult from rankers.pointwise import PointwiseLlmRanker, MonoT5LlmRanker from rankers.setwise import SetwiseLlmRanker, OpenAiSetwiseLlmRanker from rankers.pairwise import PairwiseLlmRanker, DuoT5LlmRanker, OpenAiPairwiseLlmRanker from rankers.listwise import OpenAiListwiseLlmRanker, ListwiseLlmRanker from tqdm import tqdm
13,975
random.seed(929) logger = logging.getLogger(__name__) def parse_args(parser, commands): # Divide argv by commands split_argv = [[]] for c in sys.argv[1:]: if c in commands.choices: split_argv.append([c]) else: split_argv[-1].append(c) # Initialize namespace args = argparse.Namespace() for c in commands.choices: setattr(args, c, None) # Parse each command parser.parse_args(split_argv[0], namespace=args) # Without command for argv in split_argv[1:]: # Commands n = argparse.Namespace() setattr(args, argv[0], n) parser.parse_args(argv, namespace=n) return args def write_run_file(path, results, tag): with open(path, 'w') as f: for qid, _, ranking in results: rank = 1 for doc in ranking: docid = doc.docid score = doc.score f.write(f"{qid}\tQ0\t{docid}\t{rank}\t{score}\t{tag}\n") rank += 1 def main(args): if args.pointwise: if 'monot5' in args.run.model_name_or_path: ranker = MonoT5LlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pointwise.method, batch_size=args.pointwise.batch_size) else: ranker = PointwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pointwise.method, batch_size=args.pointwise.batch_size) elif args.setwise: if args.run.openai_key: ranker = OpenAiSetwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, num_child=args.setwise.num_child, method=args.setwise.method, k=args.setwise.k) else: ranker = SetwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, num_child=args.setwise.num_child, scoring=args.run.scoring, method=args.setwise.method, num_permutation=args.setwise.num_permutation, k=args.setwise.k) elif args.pairwise: if args.pairwise.method != 'allpair': args.pairwise.batch_size = 2 logger.info(f'Setting batch_size to 2.') if args.run.openai_key: ranker = OpenAiPairwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, method=args.pairwise.method, k=args.pairwise.k) elif 'duot5' in args.run.model_name_or_path: ranker = DuoT5LlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pairwise.method, batch_size=args.pairwise.batch_size, k=args.pairwise.k) else: ranker = PairwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pairwise.method, batch_size=args.pairwise.batch_size, k=args.pairwise.k) elif args.listwise: if args.run.openai_key: ranker = OpenAiListwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, window_size=args.listwise.window_size, step_size=args.listwise.step_size, num_repeat=args.listwise.num_repeat) else:
random.seed(929) logger = logging.getLogger(__name__) def parse_args(parser, commands): # Divide argv by commands split_argv = [[]] for c in sys.argv[1:]: if c in commands.choices: split_argv.append([c]) else: split_argv[-1].append(c) # Initialize namespace args = argparse.Namespace() for c in commands.choices: setattr(args, c, None) # Parse each command parser.parse_args(split_argv[0], namespace=args) # Without command for argv in split_argv[1:]: # Commands n = argparse.Namespace() setattr(args, argv[0], n) parser.parse_args(argv, namespace=n) return args def write_run_file(path, results, tag): with open(path, 'w') as f: for qid, _, ranking in results: rank = 1 for doc in ranking: docid = doc.docid score = doc.score f.write(f"{qid}\tQ0\t{docid}\t{rank}\t{score}\t{tag}\n") rank += 1 def main(args): if args.pointwise: if 'monot5' in args.run.model_name_or_path: ranker = MonoT5LlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pointwise.method, batch_size=args.pointwise.batch_size) else: ranker = PointwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pointwise.method, batch_size=args.pointwise.batch_size) elif args.setwise: if args.run.openai_key: ranker = OpenAiSetwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, num_child=args.setwise.num_child, method=args.setwise.method, k=args.setwise.k) else: ranker = SetwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, num_child=args.setwise.num_child, scoring=args.run.scoring, method=args.setwise.method, num_permutation=args.setwise.num_permutation, k=args.setwise.k) elif args.pairwise: if args.pairwise.method != 'allpair': args.pairwise.batch_size = 2 logger.info(f'Setting batch_size to 2.') if args.run.openai_key: ranker = OpenAiPairwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, method=args.pairwise.method, k=args.pairwise.k) elif 'duot5' in args.run.model_name_or_path: ranker = DuoT5LlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pairwise.method, batch_size=args.pairwise.batch_size, k=args.pairwise.k) else: ranker = PairwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pairwise.method, batch_size=args.pairwise.batch_size, k=args.pairwise.k) elif args.listwise: if args.run.openai_key: ranker = OpenAiListwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, window_size=args.listwise.window_size, step_size=args.listwise.step_size, num_repeat=args.listwise.num_repeat) else:
ranker = ListwiseLlmRanker(model_name_or_path=args.run.model_name_or_path,
9
2023-10-14 01:39:38+00:00
16k
amazon-science/tabsyn
baselines/tabddpm/train.py
[ { "identifier": "make_dataset", "path": "utils_train.py", "snippet": "def make_dataset(\n data_path: str,\n T: src.Transformations,\n task_type,\n change_val: bool,\n concat = True,\n):\n\n # classification\n if task_type == 'binclass' or task_type == 'multiclass':\n X_cat = {} if os.path.exists(os.path.join(data_path, 'X_cat_train.npy')) else None\n X_num = {} if os.path.exists(os.path.join(data_path, 'X_num_train.npy')) else None\n y = {} if os.path.exists(os.path.join(data_path, 'y_train.npy')) else None\n\n for split in ['train', 'test']:\n X_num_t, X_cat_t, y_t = src.read_pure_data(data_path, split)\n if X_num is not None:\n X_num[split] = X_num_t\n if X_cat is not None:\n if concat:\n X_cat_t = concat_y_to_X(X_cat_t, y_t)\n X_cat[split] = X_cat_t \n if y is not None:\n y[split] = y_t\n else:\n # regression\n X_cat = {} if os.path.exists(os.path.join(data_path, 'X_cat_train.npy')) else None\n X_num = {} if os.path.exists(os.path.join(data_path, 'X_num_train.npy')) else None\n y = {} if os.path.exists(os.path.join(data_path, 'y_train.npy')) else None\n\n for split in ['train', 'test']:\n X_num_t, X_cat_t, y_t = src.read_pure_data(data_path, split)\n\n if X_num is not None:\n if concat:\n X_num_t = concat_y_to_X(X_num_t, y_t)\n X_num[split] = X_num_t\n if X_cat is not None:\n X_cat[split] = X_cat_t\n if y is not None:\n y[split] = y_t\n\n info = src.load_json(os.path.join(data_path, 'info.json'))\n\n D = src.Dataset(\n X_num,\n X_cat,\n y,\n y_info={},\n task_type=src.TaskType(info['task_type']),\n n_classes=info.get('n_classes')\n )\n\n if change_val:\n D = src.change_val(D)\n\n # def categorical_to_idx(feature):\n # unique_categories = np.unique(feature)\n # idx_mapping = {category: index for index, category in enumerate(unique_categories)}\n # idx_feature = np.array([idx_mapping[category] for category in feature])\n # return idx_feature\n\n # for split in ['train', 'val', 'test']:\n # D.y[split] = categorical_to_idx(D.y[split].squeeze(1))\n\n return src.transform_dataset(D, T, None)" }, { "identifier": "update_ema", "path": "utils_train.py", "snippet": "def update_ema(target_params, source_params, rate=0.999):\n \"\"\"\n Update target parameters to be closer to those of source parameters using\n an exponential moving average.\n :param target_params: the target parameter sequence.\n :param source_params: the source parameter sequence.\n :param rate: the EMA rate (closer to 1 means slower).\n \"\"\"\n for target, source in zip(target_params, source_params):\n target.detach().mul_(rate).add_(source.detach(), alpha=1 - rate)" }, { "identifier": "MLPDiffusion", "path": "baselines/tabddpm/models/modules.py", "snippet": "class MLPDiffusion(nn.Module):\n def __init__(self, d_in, num_classes, is_y_cond, rtdl_params, dim_t = 1024):\n super().__init__()\n self.dim_t = dim_t\n self.num_classes = num_classes\n self.is_y_cond = is_y_cond\n\n # d0 = rtdl_params['d_layers'][0]\n\n rtdl_params['d_in'] = dim_t\n rtdl_params['d_out'] = d_in\n\n self.mlp = MLP.make_baseline(**rtdl_params)\n\n if self.num_classes > 0 and is_y_cond:\n self.label_emb = nn.Embedding(self.num_classes, dim_t)\n elif self.num_classes == 0 and is_y_cond:\n self.label_emb = nn.Linear(1, dim_t)\n \n self.proj = nn.Linear(d_in, dim_t)\n self.time_embed = nn.Sequential(\n nn.Linear(dim_t, dim_t),\n nn.SiLU(),\n nn.Linear(dim_t, dim_t)\n )\n \n def forward(self, x, timesteps, y=None):\n emb = self.time_embed(timestep_embedding(timesteps, self.dim_t))\n if self.is_y_cond and y is not None:\n if self.num_classes > 0:\n y = y.squeeze()\n else:\n y = y.resize(y.size(0), 1).float()\n emb += F.silu(self.label_emb(y))\n x = self.proj(x) + emb\n\n return self.mlp(x)" }, { "identifier": "GaussianMultinomialDiffusion", "path": "baselines/tabddpm/models/gaussian_multinomial_distribution.py", "snippet": "class GaussianMultinomialDiffusion(torch.nn.Module):\n def __init__(\n self,\n num_classes: np.array,\n num_numerical_features: int,\n denoise_fn,\n num_timesteps=1000,\n gaussian_loss_type='mse',\n gaussian_parametrization='eps',\n multinomial_loss_type='vb_stochastic',\n parametrization='x0',\n scheduler='cosine',\n device=torch.device('cpu')\n ):\n\n super(GaussianMultinomialDiffusion, self).__init__()\n assert multinomial_loss_type in ('vb_stochastic', 'vb_all')\n assert parametrization in ('x0', 'direct')\n\n if multinomial_loss_type == 'vb_all':\n print('Computing the loss using the bound on _all_ timesteps.'\n ' This is expensive both in terms of memory and computation.')\n\n self.num_numerical_features = num_numerical_features\n self.num_classes = num_classes # it as a vector [K1, K2, ..., Km]\n self.num_classes_expanded = torch.from_numpy(\n np.concatenate([num_classes[i].repeat(num_classes[i]) for i in range(len(num_classes))])\n ).to(device)\n\n self.slices_for_classes = [np.arange(self.num_classes[0])]\n offsets = np.cumsum(self.num_classes)\n for i in range(1, len(offsets)):\n self.slices_for_classes.append(np.arange(offsets[i - 1], offsets[i]))\n self.offsets = torch.from_numpy(np.append([0], offsets)).to(device)\n\n self._denoise_fn = denoise_fn\n self.gaussian_loss_type = gaussian_loss_type\n self.gaussian_parametrization = gaussian_parametrization\n self.multinomial_loss_type = multinomial_loss_type\n self.num_timesteps = num_timesteps\n self.parametrization = parametrization\n self.scheduler = scheduler\n\n alphas = 1. - get_named_beta_schedule(scheduler, num_timesteps)\n alphas = torch.tensor(alphas.astype('float64')) # alpha2_t\n betas = 1. - alphas # beta2_t\n\n log_alpha = np.log(alphas)\n log_cumprod_alpha = np.cumsum(log_alpha)\n\n log_1_min_alpha = log_1_min_a(log_alpha)\n log_1_min_cumprod_alpha = log_1_min_a(log_cumprod_alpha)\n\n alphas_cumprod = np.cumprod(alphas, axis=0) # tilde_alpha2_t\n alphas_cumprod_prev = torch.tensor(np.append(1.0, alphas_cumprod[:-1])) # tilde_alpha2_{t-1}\n alphas_cumprod_next = torch.tensor(np.append(alphas_cumprod[1:], 0.0)) # tilde_alpha2_{t+1}\n sqrt_alphas_cumprod = np.sqrt(alphas_cumprod) # tilde_alpha_t\n sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - alphas_cumprod) # tilde_beta_t\n sqrt_recip_alphas_cumprod = np.sqrt(1.0 / alphas_cumprod) # sqrt(1 / tilde_alpha_t)\n sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / alphas_cumprod - 1) # sqrt(tilde_beta_t / tilde_alpha_t )\n\n # Gaussian diffusion\n\n self.posterior_variance = (\n betas * (1.0 - alphas_cumprod_prev) / (1.0 - alphas_cumprod)\n )\n self.posterior_log_variance_clipped = torch.from_numpy(\n np.log(np.append(self.posterior_variance[1], self.posterior_variance[1:]))\n ).float().to(device)\n self.posterior_mean_coef1 = (\n betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)\n ).float().to(device)\n self.posterior_mean_coef2 = (\n (1.0 - alphas_cumprod_prev)\n * np.sqrt(alphas.numpy())\n / (1.0 - alphas_cumprod)\n ).float().to(device)\n\n assert log_add_exp(log_alpha, log_1_min_alpha).abs().sum().item() < 1.e-5\n assert log_add_exp(log_cumprod_alpha, log_1_min_cumprod_alpha).abs().sum().item() < 1e-5\n assert (np.cumsum(log_alpha) - log_cumprod_alpha).abs().sum().item() < 1.e-5\n\n # Convert to float32 and register buffers.\n self.register_buffer('alphas', alphas.float().to(device))\n self.register_buffer('log_alpha', log_alpha.float().to(device))\n self.register_buffer('log_1_min_alpha', log_1_min_alpha.float().to(device))\n self.register_buffer('log_1_min_cumprod_alpha', log_1_min_cumprod_alpha.float().to(device))\n self.register_buffer('log_cumprod_alpha', log_cumprod_alpha.float().to(device))\n self.register_buffer('alphas_cumprod', alphas_cumprod.float().to(device))\n self.register_buffer('alphas_cumprod_prev', alphas_cumprod_prev.float().to(device))\n self.register_buffer('alphas_cumprod_next', alphas_cumprod_next.float().to(device))\n self.register_buffer('sqrt_alphas_cumprod', sqrt_alphas_cumprod.float().to(device))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', sqrt_one_minus_alphas_cumprod.float().to(device))\n self.register_buffer('sqrt_recip_alphas_cumprod', sqrt_recip_alphas_cumprod.float().to(device))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', sqrt_recipm1_alphas_cumprod.float().to(device))\n\n self.register_buffer('Lt_history', torch.zeros(num_timesteps))\n self.register_buffer('Lt_count', torch.zeros(num_timesteps))\n \n # Gaussian part\n def gaussian_q_mean_variance(self, x_start, t):\n mean = (\n extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start\n )\n variance = extract(1.0 - self.alphas_cumprod, t, x_start.shape)\n log_variance = extract(\n self.log_1_min_cumprod_alpha, t, x_start.shape\n )\n return mean, variance, log_variance\n \n def gaussian_q_sample(self, x_start, t, noise=None):\n if noise is None:\n noise = torch.randn_like(x_start)\n assert noise.shape == x_start.shape\n return (\n extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start\n + extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)\n * noise\n )\n \n def gaussian_q_posterior_mean_variance(self, x_start, x_t, t):\n assert x_start.shape == x_t.shape\n posterior_mean = (\n extract(self.posterior_mean_coef1, t, x_t.shape) * x_start\n + extract(self.posterior_mean_coef2, t, x_t.shape) * x_t\n )\n posterior_variance = extract(self.posterior_variance, t, x_t.shape)\n posterior_log_variance_clipped = extract(\n self.posterior_log_variance_clipped, t, x_t.shape\n )\n assert (\n posterior_mean.shape[0]\n == posterior_variance.shape[0]\n == posterior_log_variance_clipped.shape[0]\n == x_start.shape[0]\n )\n return posterior_mean, posterior_variance, posterior_log_variance_clipped\n\n def gaussian_p_mean_variance(\n self, model_output, x, t, clip_denoised=False, denoised_fn=None, model_kwargs=None\n ):\n if model_kwargs is None:\n model_kwargs = {}\n\n B, C = x.shape[:2]\n assert t.shape == (B,)\n\n model_variance = torch.cat([self.posterior_variance[1].unsqueeze(0).to(x.device), (1. - self.alphas)[1:]], dim=0)\n # model_variance = self.posterior_variance.to(x.device)\n model_log_variance = torch.log(model_variance)\n\n model_variance = extract(model_variance, t, x.shape)\n model_log_variance = extract(model_log_variance, t, x.shape)\n\n\n if self.gaussian_parametrization == 'eps':\n pred_xstart = self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)\n elif self.gaussian_parametrization == 'x0':\n pred_xstart = model_output\n else:\n raise NotImplementedError\n \n model_mean, _, _ = self.gaussian_q_posterior_mean_variance(\n x_start=pred_xstart, x_t=x, t=t\n )\n\n assert (\n model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape\n ), f'{model_mean.shape}, {model_log_variance.shape}, {pred_xstart.shape}, {x.shape}'\n\n return {\n \"mean\": model_mean,\n \"variance\": model_variance,\n \"log_variance\": model_log_variance,\n \"pred_xstart\": pred_xstart,\n }\n \n def _vb_terms_bpd(\n self, model_output, x_start, x_t, t, clip_denoised=False, model_kwargs=None\n ):\n true_mean, _, true_log_variance_clipped = self.gaussian_q_posterior_mean_variance(\n x_start=x_start, x_t=x_t, t=t\n )\n out = self.gaussian_p_mean_variance(\n model_output, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs\n )\n kl = normal_kl(\n true_mean, true_log_variance_clipped, out[\"mean\"], out[\"log_variance\"]\n )\n kl = mean_flat(kl) / np.log(2.0)\n\n decoder_nll = -discretized_gaussian_log_likelihood(\n x_start, means=out[\"mean\"], log_scales=0.5 * out[\"log_variance\"]\n )\n assert decoder_nll.shape == x_start.shape\n decoder_nll = mean_flat(decoder_nll) / np.log(2.0)\n\n # At the first timestep return the decoder NLL,\n # otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t))\n output = torch.where((t == 0), decoder_nll, kl)\n return {\"output\": output, \"pred_xstart\": out[\"pred_xstart\"], \"out_mean\": out[\"mean\"], \"true_mean\": true_mean}\n \n def _prior_gaussian(self, x_start):\n \"\"\"\n Get the prior KL term for the variational lower-bound, measured in\n bits-per-dim.\n\n This term can't be optimized, as it only depends on the encoder.\n\n :param x_start: the [N x C x ...] tensor of inputs.\n :return: a batch of [N] KL values (in bits), one per batch element.\n \"\"\"\n batch_size = x_start.shape[0]\n t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)\n qt_mean, _, qt_log_variance = self.gaussian_q_mean_variance(x_start, t)\n kl_prior = normal_kl(\n mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0\n )\n return mean_flat(kl_prior) / np.log(2.0)\n \n def _gaussian_loss(self, model_out, x_start, x_t, t, noise, model_kwargs=None):\n if model_kwargs is None:\n model_kwargs = {}\n\n terms = {}\n if self.gaussian_loss_type == 'mse':\n terms[\"loss\"] = mean_flat((noise - model_out) ** 2)\n elif self.gaussian_loss_type == 'kl':\n terms[\"loss\"] = self._vb_terms_bpd(\n model_output=model_out,\n x_start=x_start,\n x_t=x_t,\n t=t,\n clip_denoised=False,\n model_kwargs=model_kwargs,\n )[\"output\"]\n\n\n return terms['loss']\n \n def _predict_xstart_from_eps(self, x_t, t, eps):\n assert x_t.shape == eps.shape\n return (\n extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t\n - extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps\n )\n \n def _predict_eps_from_xstart(self, x_t, t, pred_xstart):\n return (\n extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t\n - pred_xstart\n ) / extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)\n\n def gaussian_p_sample(\n self,\n model_out,\n x,\n t,\n clip_denoised=False,\n denoised_fn=None,\n model_kwargs=None,\n ):\n out = self.gaussian_p_mean_variance(\n model_out,\n x,\n t,\n clip_denoised=clip_denoised,\n denoised_fn=denoised_fn,\n model_kwargs=model_kwargs,\n )\n noise = torch.randn_like(x)\n nonzero_mask = (\n (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))\n ) # no noise when t == 0\n\n sample = out[\"mean\"] + nonzero_mask * torch.exp(0.5 * out[\"log_variance\"]) * noise\n return {\"sample\": sample, \"pred_xstart\": out[\"pred_xstart\"]}\n\n # Multinomial part\n\n def multinomial_kl(self, log_prob1, log_prob2):\n\n kl = (log_prob1.exp() * (log_prob1 - log_prob2)).sum(dim=1)\n\n return kl\n\n def q_pred_one_timestep(self, log_x_t, t):\n log_alpha_t = extract(self.log_alpha, t, log_x_t.shape)\n log_1_min_alpha_t = extract(self.log_1_min_alpha, t, log_x_t.shape)\n\n # alpha_t * E[xt] + (1 - alpha_t) 1 / K\n log_probs = log_add_exp(\n log_x_t + log_alpha_t,\n log_1_min_alpha_t - torch.log(self.num_classes_expanded)\n )\n\n return log_probs\n\n def q_pred(self, log_x_start, t):\n log_cumprod_alpha_t = extract(self.log_cumprod_alpha, t, log_x_start.shape)\n log_1_min_cumprod_alpha = extract(self.log_1_min_cumprod_alpha, t, log_x_start.shape)\n\n log_probs = log_add_exp(\n log_x_start + log_cumprod_alpha_t,\n log_1_min_cumprod_alpha - torch.log(self.num_classes_expanded)\n )\n\n return log_probs\n\n def predict_start(self, model_out, log_x_t, t):\n\n\n assert model_out.size(0) == log_x_t.size(0)\n assert model_out.size(1) == self.num_classes.sum(), f'{model_out.size()}'\n\n log_pred = torch.empty_like(model_out)\n for ix in self.slices_for_classes:\n log_pred[:, ix] = F.log_softmax(model_out[:, ix], dim=1)\n return log_pred\n\n def q_posterior(self, log_x_start, log_x_t, t):\n # q(xt-1 | xt, x0) = q(xt | xt-1, x0) * q(xt-1 | x0) / q(xt | x0)\n # where q(xt | xt-1, x0) = q(xt | xt-1).\n\n # EV_log_qxt_x0 = self.q_pred(log_x_start, t)\n\n # print('sum exp', EV_log_qxt_x0.exp().sum(1).mean())\n # assert False\n\n # log_qxt_x0 = (log_x_t.exp() * EV_log_qxt_x0).sum(dim=1)\n t_minus_1 = t - 1\n # Remove negative values, will not be used anyway for final decoder\n t_minus_1 = torch.where(t_minus_1 < 0, torch.zeros_like(t_minus_1), t_minus_1)\n log_EV_qxtmin_x0 = self.q_pred(log_x_start, t_minus_1)\n\n num_axes = (1,) * (len(log_x_start.size()) - 1)\n t_broadcast = t.to(log_x_start.device).view(-1, *num_axes) * torch.ones_like(log_x_start)\n log_EV_qxtmin_x0 = torch.where(t_broadcast == 0, log_x_start, log_EV_qxtmin_x0.to(torch.float32))\n\n # unnormed_logprobs = log_EV_qxtmin_x0 +\n # log q_pred_one_timestep(x_t, t)\n # Note: _NOT_ x_tmin1, which is how the formula is typically used!!!\n # Not very easy to see why this is true. But it is :)\n unnormed_logprobs = log_EV_qxtmin_x0 + self.q_pred_one_timestep(log_x_t, t)\n\n sliced = sliced_logsumexp(unnormed_logprobs, self.offsets)\n log_EV_xtmin_given_xt_given_xstart = unnormed_logprobs - sliced\n\n return log_EV_xtmin_given_xt_given_xstart\n\n def p_pred(self, model_out, log_x, t):\n if self.parametrization == 'x0':\n log_x_recon = self.predict_start(model_out, log_x, t=t)\n log_model_pred = self.q_posterior(\n log_x_start=log_x_recon, log_x_t=log_x, t=t)\n elif self.parametrization == 'direct':\n log_model_pred = self.predict_start(model_out, log_x, t=t)\n else:\n raise ValueError\n\n\n return log_model_pred\n\n @torch.no_grad()\n def p_sample(self, model_out, log_x, t):\n model_log_prob = self.p_pred(model_out, log_x=log_x, t=t)\n out = self.log_sample_categorical(model_log_prob)\n return out\n\n @torch.no_grad()\n def p_sample_loop(self, shape):\n device = self.log_alpha.device\n\n b = shape[0]\n # start with random normal image.\n img = torch.randn(shape, device=device)\n\n for i in reversed(range(1, self.num_timesteps)):\n img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long))\n return img\n\n @torch.no_grad()\n def _sample(self, image_size, batch_size = 16):\n return self.p_sample_loop((batch_size, 3, image_size, image_size))\n\n @torch.no_grad()\n def interpolate(self, x1, x2, t = None, lam = 0.5):\n b, *_, device = *x1.shape, x1.device\n t = default(t, self.num_timesteps - 1)\n\n assert x1.shape == x2.shape\n\n t_batched = torch.stack([torch.tensor(t, device=device)] * b)\n xt1, xt2 = map(lambda x: self.q_sample(x, t=t_batched), (x1, x2))\n\n img = (1 - lam) * xt1 + lam * xt2\n for i in reversed(range(0, t)):\n img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long))\n\n return img\n\n def log_sample_categorical(self, logits):\n full_sample = []\n for i in range(len(self.num_classes)):\n one_class_logits = logits[:, self.slices_for_classes[i]]\n uniform = torch.rand_like(one_class_logits)\n gumbel_noise = -torch.log(-torch.log(uniform + 1e-30) + 1e-30)\n sample = (gumbel_noise + one_class_logits).argmax(dim=1)\n full_sample.append(sample.unsqueeze(1))\n full_sample = torch.cat(full_sample, dim=1)\n log_sample = index_to_log_onehot(full_sample, self.num_classes)\n return log_sample\n\n def q_sample(self, log_x_start, t):\n log_EV_qxt_x0 = self.q_pred(log_x_start, t)\n\n log_sample = self.log_sample_categorical(log_EV_qxt_x0)\n\n return log_sample\n\n def nll(self, log_x_start):\n b = log_x_start.size(0)\n device = log_x_start.device\n loss = 0\n for t in range(0, self.num_timesteps):\n t_array = (torch.ones(b, device=device) * t).long()\n\n kl = self.compute_Lt(\n log_x_start=log_x_start,\n log_x_t=self.q_sample(log_x_start=log_x_start, t=t_array),\n t=t_array)\n\n loss += kl\n\n loss += self.kl_prior(log_x_start)\n\n return loss\n\n def kl_prior(self, log_x_start):\n b = log_x_start.size(0)\n device = log_x_start.device\n ones = torch.ones(b, device=device).long()\n\n log_qxT_prob = self.q_pred(log_x_start, t=(self.num_timesteps - 1) * ones)\n log_half_prob = -torch.log(self.num_classes_expanded * torch.ones_like(log_qxT_prob))\n\n kl_prior = self.multinomial_kl(log_qxT_prob, log_half_prob)\n\n return sum_except_batch(kl_prior)\n\n def compute_Lt(self, model_out, log_x_start, log_x_t, t, detach_mean=False):\n log_true_prob = self.q_posterior(\n log_x_start=log_x_start, log_x_t=log_x_t, t=t)\n log_model_prob = self.p_pred(model_out, log_x=log_x_t, t=t)\n\n if detach_mean:\n log_model_prob = log_model_prob.detach()\n\n kl = self.multinomial_kl(log_true_prob, log_model_prob)\n\n # if torch.isinf(kl).nonzero().shape[0] != 0:\n # idx = torch.isinf(kl).nonzero()[0]\n # print('KL 0 :', kl[idx])\n\n kl = sum_except_batch(kl)\n\n decoder_nll = -log_categorical(log_x_start, log_model_prob)\n decoder_nll = sum_except_batch(decoder_nll)\n\n mask = (t == torch.zeros_like(t)).float()\n loss = mask * decoder_nll + (1. - mask) * kl \n\n return loss\n\n def sample_time(self, b, device, method='uniform'):\n if method == 'importance':\n if not (self.Lt_count > 10).all():\n return self.sample_time(b, device, method='uniform')\n\n Lt_sqrt = torch.sqrt(self.Lt_history + 1e-10) + 0.0001\n Lt_sqrt[0] = Lt_sqrt[1] # Overwrite decoder term with L1.\n pt_all = (Lt_sqrt / Lt_sqrt.sum()).to(device)\n\n t = torch.multinomial(pt_all, num_samples=b, replacement=True).to(device)\n\n pt = pt_all.gather(dim=0, index=t)\n\n return t, pt\n\n elif method == 'uniform':\n t = torch.randint(0, self.num_timesteps, (b,), device=device).long()\n\n pt = torch.ones_like(t).float() / self.num_timesteps\n return t, pt\n else:\n raise ValueError\n\n def _multinomial_loss(self, model_out, log_x_start, log_x_t, t, pt):\n\n if self.multinomial_loss_type == 'vb_stochastic':\n\n kl = self.compute_Lt(\n model_out, log_x_start, log_x_t, t\n )\n kl_prior = self.kl_prior(log_x_start)\n # Upweigh loss term of the kl\n\n vb_loss = kl / pt + kl_prior\n\n\n return vb_loss\n\n elif self.multinomial_loss_type == 'vb_all':\n # Expensive, dont do it ;).\n # DEPRECATED\n return -self.nll(log_x_start)\n else:\n raise ValueError()\n\n def log_prob(self, x):\n b, device = x.size(0), x.device\n if self.training:\n return self._multinomial_loss(x)\n\n else:\n log_x_start = index_to_log_onehot(x, self.num_classes)\n\n t, pt = self.sample_time(b, device, 'importance')\n\n kl = self.compute_Lt(\n log_x_start, self.q_sample(log_x_start=log_x_start, t=t), t)\n\n kl_prior = self.kl_prior(log_x_start)\n\n # Upweigh loss term of the kl\n loss = kl / (pt + 1e-6) + kl_prior\n\n return -loss\n \n @torch.no_grad()\n def loss_at_step_t(self, x, step):\n\n b = x.shape[0]\n device = x.device\n\n t = (torch.ones((b,)) * step).long().to(device)\n pt = torch.ones_like(t).float() / self.num_timesteps\n\n x_num = x[:, :self.num_numerical_features]\n x_cat = x[:, self.num_numerical_features:]\n \n x_num_t = x_num\n log_x_cat_t = x_cat\n if x_num.shape[1] > 0:\n noise = torch.randn_like(x_num)\n x_num_t = self.gaussian_q_sample(x_num, t, noise=noise)\n if x_cat.shape[1] > 0:\n log_x_cat = index_to_log_onehot(x_cat.long(), self.num_classes)\n log_x_cat_t = self.q_sample(log_x_start=log_x_cat, t=t)\n \n x_in = torch.cat([x_num_t, log_x_cat_t], dim=1)\n\n model_out = self._denoise_fn(\n x_in,\n t\n )\n\n model_out_num = model_out[:, :self.num_numerical_features]\n model_out_cat = model_out[:, self.num_numerical_features:]\n\n loss_multi = torch.zeros((1,)).float()\n loss_gauss = torch.zeros((1,)).float()\n if x_cat.shape[1] > 0:\n loss_multi = self._multinomial_loss(model_out_cat, log_x_cat, log_x_cat_t, t, pt) / len(self.num_classes)\n \n if x_num.shape[1] > 0:\n loss_gauss = self._gaussian_loss(model_out_num, x_num, x_num_t, t, noise)\n\n recon_x0_num = self.recon_x0(x_in, model_out, t)[:,:self.num_numerical_features]\n\n recon_loss = self._gaussian_loss(recon_x0_num, x_num, x_num_t, t, x_num)\n\n return loss_multi.mean(), loss_gauss.mean(), recon_loss.mean()\n \n @torch.no_grad()\n def recon_x0(self, x, model_out, t):\n # x_num = x[:, :self.num_numerical_features]\n\n x0 = extract(self.sqrt_recip_alphas_cumprod, t, x.shape) * (x - model_out * extract(self.sqrt_one_minus_alphas_cumprod, t, x.shape))\n \n return x0\n\n def mixed_loss(self, x):\n b = x.shape[0]\n device = x.device\n t, pt = self.sample_time(b, device, 'uniform')\n\n x_num = x[:, :self.num_numerical_features]\n x_cat = x[:, self.num_numerical_features:]\n \n x_num_t = x_num\n log_x_cat_t = x_cat\n if x_num.shape[1] > 0:\n noise = torch.randn_like(x_num)\n x_num_t = self.gaussian_q_sample(x_num, t, noise=noise)\n if x_cat.shape[1] > 0:\n log_x_cat = index_to_log_onehot(x_cat.long(), self.num_classes)\n log_x_cat_t = self.q_sample(log_x_start=log_x_cat, t=t)\n \n x_in = torch.cat([x_num_t, log_x_cat_t], dim=1)\n\n model_out = self._denoise_fn(\n x_in,\n t\n )\n\n model_out_num = model_out[:, :self.num_numerical_features]\n model_out_cat = model_out[:, self.num_numerical_features:]\n\n loss_multi = torch.zeros((1,)).float()\n loss_gauss = torch.zeros((1,)).float()\n\n if x_cat.shape[1] > 0:\n loss_multi = self._multinomial_loss(model_out_cat, log_x_cat, log_x_cat_t, t, pt) / len(self.num_classes)\n \n if x_num.shape[1] > 0:\n loss_gauss = self._gaussian_loss(model_out_num, x_num, x_num_t, t, noise)\n\n\n return loss_multi.mean(), loss_gauss.mean()\n \n @torch.no_grad()\n def mixed_elbo(self, x0):\n b = x0.size(0)\n device = x0.device\n\n x_num = x0[:, :self.num_numerical_features]\n x_cat = x0[:, self.num_numerical_features:]\n has_cat = x_cat.shape[1] > 0\n if has_cat:\n log_x_cat = index_to_log_onehot(x_cat.long(), self.num_classes).to(device)\n\n gaussian_loss = []\n xstart_mse = []\n mse = []\n mu_mse = []\n out_mean = []\n true_mean = []\n multinomial_loss = []\n for t in range(self.num_timesteps):\n t_array = (torch.ones(b, device=device) * t).long()\n noise = torch.randn_like(x_num)\n\n x_num_t = self.gaussian_q_sample(x_start=x_num, t=t_array, noise=noise)\n if has_cat:\n log_x_cat_t = self.q_sample(log_x_start=log_x_cat, t=t_array)\n else:\n log_x_cat_t = x_cat\n\n model_out = self._denoise_fn(\n torch.cat([x_num_t, log_x_cat_t], dim=1),\n t_array\n )\n \n model_out_num = model_out[:, :self.num_numerical_features]\n model_out_cat = model_out[:, self.num_numerical_features:]\n\n kl = torch.tensor([0.0])\n if has_cat:\n kl = self.compute_Lt(\n model_out=model_out_cat,\n log_x_start=log_x_cat,\n log_x_t=log_x_cat_t,\n t=t_array\n )\n\n out = self._vb_terms_bpd(\n model_out_num,\n x_start=x_num,\n x_t=x_num_t,\n t=t_array,\n clip_denoised=False\n )\n\n multinomial_loss.append(kl)\n gaussian_loss.append(out[\"output\"])\n xstart_mse.append(mean_flat((out[\"pred_xstart\"] - x_num) ** 2))\n # mu_mse.append(mean_flat(out[\"mean_mse\"]))\n out_mean.append(mean_flat(out[\"out_mean\"]))\n true_mean.append(mean_flat(out[\"true_mean\"]))\n\n eps = self._predict_eps_from_xstart(x_num_t, t_array, out[\"pred_xstart\"])\n mse.append(mean_flat((eps - noise) ** 2))\n\n gaussian_loss = torch.stack(gaussian_loss, dim=1)\n multinomial_loss = torch.stack(multinomial_loss, dim=1)\n xstart_mse = torch.stack(xstart_mse, dim=1)\n mse = torch.stack(mse, dim=1)\n # mu_mse = torch.stack(mu_mse, dim=1)\n out_mean = torch.stack(out_mean, dim=1)\n true_mean = torch.stack(true_mean, dim=1)\n\n\n\n prior_gauss = self._prior_gaussian(x_num)\n\n prior_multin = torch.tensor([0.0])\n if has_cat:\n prior_multin = self.kl_prior(log_x_cat)\n\n total_gauss = gaussian_loss.sum(dim=1) + prior_gauss\n total_multin = multinomial_loss.sum(dim=1) + prior_multin\n return {\n \"total_gaussian\": total_gauss,\n \"total_multinomial\": total_multin,\n \"losses_gaussian\": gaussian_loss,\n \"losses_multinimial\": multinomial_loss,\n \"xstart_mse\": xstart_mse,\n \"mse\": mse,\n # \"mu_mse\": mu_mse\n \"out_mean\": out_mean,\n \"true_mean\": true_mean\n }\n\n @torch.no_grad()\n def gaussian_ddim_step(\n self,\n model_out_num,\n x,\n t,\n t_prev,\n clip_denoised=False,\n denoised_fn=None,\n eta=1.0\n ):\n out = self.gaussian_p_mean_variance(\n model_out_num,\n x,\n t,\n clip_denoised=clip_denoised,\n denoised_fn=denoised_fn,\n model_kwargs=None,\n )\n\n eps = self._predict_eps_from_xstart(x, t, out[\"pred_xstart\"])\n\n alpha_bar = extract(self.alphas_cumprod, t, x.shape)\n \n if t[0] != 0:\n alpha_bar_prev = extract(self.alphas_cumprod, t_prev, x.shape)\n else:\n alpha_bar_prev = extract(self.alphas_cumprod_prev, t_prev, x.shape)\n \n sigma = (\n eta\n * torch.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar))\n * torch.sqrt(1 - alpha_bar / alpha_bar_prev)\n )\n\n noise = torch.randn_like(x)\n mean_pred = (\n out[\"pred_xstart\"] * torch.sqrt(alpha_bar_prev)\n + torch.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps\n )\n nonzero_mask = (\n (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))\n ) # no noise when t == 0\n sample = mean_pred + nonzero_mask * sigma * noise\n\n return sample\n\n \n @torch.no_grad()\n def gaussian_ddim_sample(\n self,\n noise,\n T,\n eta=0.0\n ):\n x = noise\n b = x.shape[0]\n device = x.device\n for t in reversed(range(T)):\n print(f'Sample timestep {t:4d}', end='\\r')\n t_array = (torch.ones(b, device=device) * t).long()\n out_num = self._denoise_fn(x, t_array)\n x = self.gaussian_ddim_step(\n out_num,\n x,\n t_array\n )\n print()\n return x\n\n\n @torch.no_grad()\n def gaussian_ddim_reverse_step(\n self,\n model_out_num,\n x,\n t,\n clip_denoised=False,\n eta=0.0\n ):\n assert eta == 0.0, \"Eta must be zero.\"\n out = self.gaussian_p_mean_variance(\n model_out_num,\n x,\n t,\n clip_denoised=clip_denoised,\n denoised_fn=None,\n model_kwargs=None,\n )\n\n eps = (\n extract(self.sqrt_recip_alphas_cumprod, t, x.shape) * x\n - out[\"pred_xstart\"]\n ) / extract(self.sqrt_recipm1_alphas_cumprod, t, x.shape)\n alpha_bar_next = extract(self.alphas_cumprod_next, t, x.shape)\n\n mean_pred = (\n out[\"pred_xstart\"] * torch.sqrt(alpha_bar_next)\n + torch.sqrt(1 - alpha_bar_next) * eps\n )\n\n return mean_pred\n\n @torch.no_grad()\n def gaussian_ddim_reverse_sample(\n self,\n x,\n T\n ):\n b = x.shape[0]\n device = x.device\n for t in range(T):\n print(f'Reverse timestep {t:4d}', end='\\r')\n t_array = (torch.ones(b, device=device) * t).long()\n out_num = self._denoise_fn(x, t_array)\n x = self.gaussian_ddim_reverse_step(\n out_num,\n x,\n t_array,\n eta=0.0\n )\n print()\n\n return x\n\n\n @torch.no_grad()\n def multinomial_ddim_step(\n self,\n model_out_cat,\n log_x_t,\n t,\n t_prev,\n eta=1.0\n ):\n # not ddim, essentially\n log_x0 = self.predict_start(model_out_cat, log_x_t=log_x_t, t=t)\n\n alpha_bar = extract(self.alphas_cumprod, t, log_x_t.shape)\n\n if t[0] != 0:\n alpha_bar_prev = extract(self.alphas_cumprod, t_prev, log_x_t.shape)\n else:\n alpha_bar_prev = extract(self.alphas_cumprod_prev, t_prev, log_x_t.shape)\n \n sigma = (\n eta\n * torch.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar))\n * torch.sqrt(1 - alpha_bar / alpha_bar_prev)\n )\n\n coef1 = sigma\n coef2 = alpha_bar_prev - sigma * alpha_bar\n coef3 = 1 - coef1 - coef2\n\n\n log_ps = torch.stack([\n torch.log(coef1) + log_x_t,\n torch.log(coef2) + log_x0,\n torch.log(coef3) - torch.log(self.num_classes_expanded)\n ], dim=2) \n\n log_prob = torch.logsumexp(log_ps, dim=2)\n\n out = self.log_sample_categorical(log_prob)\n\n return out\n\n @torch.no_grad()\n def sample_ddim(self, num_samples, steps = 1000):\n b = num_samples\n device = self.log_alpha.device\n z_norm = torch.randn((b, self.num_numerical_features), device=device)\n\n has_cat = self.num_classes[0] != 0\n log_z = torch.zeros((b, 0), device=device).float()\n if has_cat:\n uniform_logits = torch.zeros((b, len(self.num_classes_expanded)), device=device)\n log_z = self.log_sample_categorical(uniform_logits)\n \n interval = 1000 // steps\n timesteps = list(np.arange(999, -1, -interval))\n\n if timesteps[-1] != 0:\n timesteps.append(0)\n \n for i in range(0, len(timesteps)):\n\n print(f'Sample timestep {i:4d}', end='\\r')\n \n t = torch.full((b,), timesteps[i], device=device, dtype=torch.long)\n \n \n if i != len(timesteps) -1 :\n t_prev = torch.full((b,), timesteps[i+1], device=device, dtype=torch.long)\n else:\n t_prev = torch.full((b,), 0, device=device, dtype=torch.long)\n \n model_out = self._denoise_fn(\n torch.cat([z_norm, log_z], dim=1).float(),\n t\n )\n model_out_num = model_out[:, :self.num_numerical_features]\n model_out_cat = model_out[:, self.num_numerical_features:]\n z_norm = self.gaussian_ddim_step(model_out_num, z_norm, t, t_prev, clip_denoised=False)\n if has_cat:\n log_z = self.multinomial_ddim_step(model_out_cat, log_z, t, t_prev)\n\n print()\n z_ohe = torch.exp(log_z).round()\n z_cat = log_z\n if has_cat:\n z_cat = ohe_to_categories(z_ohe, self.num_classes)\n sample = torch.cat([z_norm, z_cat], dim=1).cpu()\n return sample\n\n\n @torch.no_grad()\n def sample(self, num_samples):\n b = num_samples\n device = self.log_alpha.device\n z_norm = torch.randn((b, self.num_numerical_features), device=device)\n\n has_cat = self.num_classes[0] != 0\n log_z = torch.zeros((b, 0), device=device).float()\n if has_cat:\n uniform_logits = torch.zeros((b, len(self.num_classes_expanded)), device=device)\n print(uniform_logits.shape)\n log_z = self.log_sample_categorical(uniform_logits)\n\n for i in reversed(range(0, self.num_timesteps)):\n print(f'Sample timestep {i:4d}', end='\\r')\n t = torch.full((b,), i, device=device, dtype=torch.long)\n model_out = self._denoise_fn(\n torch.cat([z_norm, log_z], dim=1).float(),\n t\n )\n model_out_num = model_out[:, :self.num_numerical_features]\n model_out_cat = model_out[:, self.num_numerical_features:]\n z_norm = self.gaussian_p_sample(model_out_num, z_norm, t, clip_denoised=False)['sample']\n if has_cat:\n log_z = self.p_sample(model_out_cat, log_z, t)\n\n print()\n z_ohe = torch.exp(log_z).round()\n z_cat = log_z\n if has_cat:\n z_cat = ohe_to_categories(z_ohe, self.num_classes)\n sample = torch.cat([z_norm, z_cat], dim=1).cpu()\n return sample\n \n def sample_all(self, num_samples, batch_size, ddim=False, steps = 1000):\n if ddim:\n print('Sample using DDIM.')\n sample_fn = self.sample_ddim\n else:\n sample_fn = self.sample\n \n b = batch_size\n\n all_samples = []\n num_generated = 0\n while num_generated < num_samples:\n if not ddim:\n sample = sample_fn(b)\n else:\n sample = sample_fn(b, steps=steps)\n mask_nan = torch.any(sample.isnan(), dim=1)\n sample = sample[~mask_nan]\n\n all_samples.append(sample)\n \n if sample.shape[0] != b:\n raise FoundNANsError\n num_generated += sample.shape[0]\n\n x_gen = torch.cat(all_samples, dim=0)[:num_samples]\n\n return x_gen" } ]
import os import sys import time import torch import numpy as np import pandas as pd import src from copy import deepcopy from utils_train import make_dataset, update_ema from baselines.tabddpm.models.modules import MLPDiffusion from baselines.tabddpm.models.gaussian_multinomial_distribution import GaussianMultinomialDiffusion
12,837
self.optimizer.zero_grad() loss_multi, loss_gauss = self.diffusion.mixed_loss(x) loss = loss_multi + loss_gauss loss.backward() self.optimizer.step() return loss_multi, loss_gauss def run_loop(self): step = 0 curr_loss_multi = 0.0 curr_loss_gauss = 0.0 curr_count = 0 self.print_every = 1 self.log_every = 1 best_loss = np.inf print('Steps: ', self.steps) while step < self.steps: start_time = time.time() x = next(self.train_iter)[0] batch_loss_multi, batch_loss_gauss = self._run_step(x) self._anneal_lr(step) curr_count += len(x) curr_loss_multi += batch_loss_multi.item() * len(x) curr_loss_gauss += batch_loss_gauss.item() * len(x) if (step + 1) % self.log_every == 0: mloss = np.around(curr_loss_multi / curr_count, 4) gloss = np.around(curr_loss_gauss / curr_count, 4) if np.isnan(gloss): print('Finding Nan') break if (step + 1) % self.print_every == 0: print(f'Step {(step + 1)}/{self.steps} MLoss: {mloss} GLoss: {gloss} Sum: {mloss + gloss}') self.loss_history.loc[len(self.loss_history)] =[step + 1, mloss, gloss, mloss + gloss] np.set_printoptions(suppress=True) curr_count = 0 curr_loss_gauss = 0.0 curr_loss_multi = 0.0 if mloss + gloss < best_loss: best_loss = mloss + gloss torch.save(self.diffusion._denoise_fn.state_dict(), os.path.join(self.model_save_path, 'model.pt')) if (step + 1) % 10000 == 0: torch.save(self.diffusion._denoise_fn.state_dict(), os.path.join(self.model_save_path, f'model_{step+1}.pt')) # update_ema(self.ema_model.parameters(), self.diffusion._denoise_fn.parameters()) step += 1 # end_time = time.time() # print('Time: ', end_time - start_time) def train( model_save_path, real_data_path, steps = 1000, lr = 0.002, weight_decay = 1e-4, batch_size = 1024, task_type = 'binclass', model_type = 'mlp', model_params = None, num_timesteps = 1000, gaussian_loss_type = 'mse', scheduler = 'cosine', T_dict = None, num_numerical_features = 0, device = torch.device('cuda:0'), seed = 0, change_val = False ): real_data_path = os.path.normpath(real_data_path) # zero.improve_reproducibility(seed) T = src.Transformations(**T_dict) dataset = make_dataset( real_data_path, T, task_type = task_type, change_val = False, ) K = np.array(dataset.get_category_sizes('train')) if len(K) == 0 or T_dict['cat_encoding'] == 'one-hot': K = np.array([0]) num_numerical_features = dataset.X_num['train'].shape[1] if dataset.X_num is not None else 0 d_in = np.sum(K) + num_numerical_features model_params['d_in'] = d_in print(d_in) print(model_params) model = get_model( model_type, model_params, num_numerical_features, category_sizes=dataset.get_category_sizes('train') ) model.to(device) print(model) train_loader = src.prepare_fast_dataloader(dataset, split='train', batch_size=batch_size)
def get_model( model_name, model_params, n_num_features, category_sizes ): print(model_name) if model_name == 'mlp': model = MLPDiffusion(**model_params) else: raise "Unknown model!" return model class Trainer: def __init__(self, diffusion, train_iter, lr, weight_decay, steps, model_save_path, device=torch.device('cuda:1')): self.diffusion = diffusion self.ema_model = deepcopy(self.diffusion._denoise_fn) for param in self.ema_model.parameters(): param.detach_() self.train_iter = train_iter self.steps = steps self.init_lr = lr self.optimizer = torch.optim.AdamW(self.diffusion.parameters(), lr=lr, weight_decay=weight_decay) self.device = device self.loss_history = pd.DataFrame(columns=['step', 'mloss', 'gloss', 'loss']) self.model_save_path = model_save_path columns = list(np.arange(5)*200) columns[0] = 1 columns = ['step'] + columns self.log_every = 50 self.print_every = 1 self.ema_every = 1000 def _anneal_lr(self, step): frac_done = step / self.steps lr = self.init_lr * (1 - frac_done) for param_group in self.optimizer.param_groups: param_group["lr"] = lr def _run_step(self, x): x = x.to(self.device) self.optimizer.zero_grad() loss_multi, loss_gauss = self.diffusion.mixed_loss(x) loss = loss_multi + loss_gauss loss.backward() self.optimizer.step() return loss_multi, loss_gauss def run_loop(self): step = 0 curr_loss_multi = 0.0 curr_loss_gauss = 0.0 curr_count = 0 self.print_every = 1 self.log_every = 1 best_loss = np.inf print('Steps: ', self.steps) while step < self.steps: start_time = time.time() x = next(self.train_iter)[0] batch_loss_multi, batch_loss_gauss = self._run_step(x) self._anneal_lr(step) curr_count += len(x) curr_loss_multi += batch_loss_multi.item() * len(x) curr_loss_gauss += batch_loss_gauss.item() * len(x) if (step + 1) % self.log_every == 0: mloss = np.around(curr_loss_multi / curr_count, 4) gloss = np.around(curr_loss_gauss / curr_count, 4) if np.isnan(gloss): print('Finding Nan') break if (step + 1) % self.print_every == 0: print(f'Step {(step + 1)}/{self.steps} MLoss: {mloss} GLoss: {gloss} Sum: {mloss + gloss}') self.loss_history.loc[len(self.loss_history)] =[step + 1, mloss, gloss, mloss + gloss] np.set_printoptions(suppress=True) curr_count = 0 curr_loss_gauss = 0.0 curr_loss_multi = 0.0 if mloss + gloss < best_loss: best_loss = mloss + gloss torch.save(self.diffusion._denoise_fn.state_dict(), os.path.join(self.model_save_path, 'model.pt')) if (step + 1) % 10000 == 0: torch.save(self.diffusion._denoise_fn.state_dict(), os.path.join(self.model_save_path, f'model_{step+1}.pt')) # update_ema(self.ema_model.parameters(), self.diffusion._denoise_fn.parameters()) step += 1 # end_time = time.time() # print('Time: ', end_time - start_time) def train( model_save_path, real_data_path, steps = 1000, lr = 0.002, weight_decay = 1e-4, batch_size = 1024, task_type = 'binclass', model_type = 'mlp', model_params = None, num_timesteps = 1000, gaussian_loss_type = 'mse', scheduler = 'cosine', T_dict = None, num_numerical_features = 0, device = torch.device('cuda:0'), seed = 0, change_val = False ): real_data_path = os.path.normpath(real_data_path) # zero.improve_reproducibility(seed) T = src.Transformations(**T_dict) dataset = make_dataset( real_data_path, T, task_type = task_type, change_val = False, ) K = np.array(dataset.get_category_sizes('train')) if len(K) == 0 or T_dict['cat_encoding'] == 'one-hot': K = np.array([0]) num_numerical_features = dataset.X_num['train'].shape[1] if dataset.X_num is not None else 0 d_in = np.sum(K) + num_numerical_features model_params['d_in'] = d_in print(d_in) print(model_params) model = get_model( model_type, model_params, num_numerical_features, category_sizes=dataset.get_category_sizes('train') ) model.to(device) print(model) train_loader = src.prepare_fast_dataloader(dataset, split='train', batch_size=batch_size)
diffusion = GaussianMultinomialDiffusion(
3
2023-10-10 18:06:31+00:00
16k
ThomasMrY/DisDiff
ldm/models/diffusion/ddpm_kl.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates\n else torch.tensor(-1,dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.','')\n self.m_name2s_name.update({name:s_name})\n self.register_buffer(s_name,p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self,model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n \n def kl_splits(self, latent_unit=6):\n mean_splits = self.mean.chunk(latent_unit, dim=-1)\n var_splits = self.var.chunk(latent_unit, dim=-1)\n logvar_splits = self.logvar.chunk(latent_unit, dim=-1)\n kl_loss = 0\n for mean, var, logvar in zip(mean_splits, var_splits, logvar_splits):\n kl_split = 0.5 * torch.sum(torch.pow(mean, 2)\n + var - 1.0 - logvar,\n dim=-1)\n kl_loss += torch.sum(kl_split) / kl_split.shape[0]\n return kl_loss/latent_unit\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "VQModelInterface", "path": "ldm/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n # h = self.encoder(x)\n # h = self.quant_conv(h)\n # quant, emb_loss, info = self.quantize(h)\n # return quant, emb_loss, info\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n log_dict_ae[\"train/epoch_num\"] = self.current_epoch\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(list(self.encoder.parameters())+\n list(self.decoder.parameters())+\n list(self.quant_conv.parameters())+\n list(self.post_quant_conv.parameters()),\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev, ddim_coef = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_coef', ddim_coef)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(cond = conditioning, shape=size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,**kwargs):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim(x = img, c=cond, t=ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning, **kwargs)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,**kwargs):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c, **kwargs)\n e_t = return_wrap(e_t, torch.full((b, 1, 1, 1), self.ddim_coef[index], device=device))\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n # p.savez(\"data.npz\", z=z, x = x, xrec = xrec, x_T = x_T, time = time, alphas = alphas, alphas_prev = alphas_prev, sqrt_one_minus_alphas = sqrt_one_minus_alphas, sigmas = sigmas.cpu().numpy(),e_t = e_t)\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0" }, { "identifier": "return_wrap", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def return_wrap(inp, coef):\n if isinstance(inp, Return):\n return inp.pred\n elif isinstance(inp, Return_grad) or isinstance(inp, Return_grad_full):\n # return inp.out_grad\n return inp.pred + coef * inp.out_grad" } ]
import torch import torch.nn as nn import numpy as np import torch.nn.functional as F import pytorch_lightning as pl import copy import os import pandas as pd from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler from ldm.modules.diffusionmodules.util import return_wrap
11,446
def apply_model(self, x_noisy, t, cond, return_ids=False, sampled_concept= None): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if self.cond_stage_key in ["image", "LR_image", "segmentation", 'bbox_img'] and self.model.conditioning_key: # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert (len(c) == 1) # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == 'coordinates_bbox': assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params['original_image_size'] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) for patch_nr in range(z.shape[-1])] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [(x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) for bbox in patch_limits] # list of length l with tensors of shape (1, 2) print(patch_limits_tknzd[0].shape) # cut tknzd crop position from conditioning assert isinstance(cond, dict), 'cond must be dict to be fed into model' cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) print(cut_cond.shape) adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') print(adapted_cond.shape) adapted_cond = self.get_learned_conditioning(adapted_cond) print(adapted_cond.shape) adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) print(adapted_cond.shape) cond_list = [{'c_crossattn': [e]} for e in adapted_cond] else: cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient # apply model by loop over crops output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] assert not isinstance(output_list[0], tuple) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, sampled_concept = sampled_concept, **cond) # if isinstance(x_recon, tuple) and not return_ids: # return x_recon[0] # else: # return x_recon return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) self.ce_loss = nn.CrossEntropyLoss(reduction = "none") if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) self.register_buffer("shift_coef", - to_torch(np.sqrt(alphas)) * (1. - self.alphas_cumprod_prev) / torch.sqrt(1. - self.alphas_cumprod)) self.register_buffer("ddim_coef", -self.sqrt_one_minus_alphas_cumprod) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") self.load_epoch = sd['epoch'] self.load_step = sd["global_step"] if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) eps_pred = return_wrap(model_out, extract_into_tensor(self.ddim_coef, t, x.shape)) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=eps_pred) elif self.parameterization == "x0": x_recon = eps_pred if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) eps_pred = return_wrap(model_out, extract_into_tensor(self.shift_coef, t, x_start.shape)) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") loss = self.get_loss(eps_pred, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): pass # _, loss_dict_no_ema = self.shared_step(batch) # with self.ema_scope(): # _, loss_dict_ema = self.shared_step(batch) # loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} # self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) # self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, dis_loss_flag = False, detach_flag = False, train_enc_flag = False, dis_weight = 1.0, kl_weight = 0.0005, dis_loss_type = "IM", kl_loss_flag = False, *args, **kwargs): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key self.dis_loss_flag = dis_loss_flag self.detach_flag = detach_flag self.train_enc_flag = train_enc_flag self.dis_weight = dis_weight self.dis_loss_type = dis_loss_type self.kl_loss_flag = kl_loss_flag self.kl_weight = kl_weight try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() # def on_train_batch_start(self, batch, batch_idx, dataloader_idx): def on_train_batch_start(self, batch, batch_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if hasattr(self.model.diffusion_model,"scale_factor"): del self.scale_factor self.register_buffer('scale_factor', self.model.diffusion_model.scale_factor) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING Pre-Trained STD-RESCALING ###") else: del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox']: xc = batch[cond_key] elif cond_key == 'class_label': xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): # import pudb; pudb.set_trace() c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] else: c = None xc = None out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) # same as above but without decorator def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) df = self.split_input_params["vqf"] self.split_input_params['original_image_size'] = x.shape[-2:] bs, nc, h, w = x.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df) z = unfold(x) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) output_list = [self.first_stage_model.encode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization return decoded else: return self.first_stage_model.encode(x) else: return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False, sampled_concept= None): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if self.cond_stage_key in ["image", "LR_image", "segmentation", 'bbox_img'] and self.model.conditioning_key: # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert (len(c) == 1) # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == 'coordinates_bbox': assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params['original_image_size'] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) for patch_nr in range(z.shape[-1])] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [(x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) for bbox in patch_limits] # list of length l with tensors of shape (1, 2) print(patch_limits_tknzd[0].shape) # cut tknzd crop position from conditioning assert isinstance(cond, dict), 'cond must be dict to be fed into model' cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) print(cut_cond.shape) adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') print(adapted_cond.shape) adapted_cond = self.get_learned_conditioning(adapted_cond) print(adapted_cond.shape) adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) print(adapted_cond.shape) cond_list = [{'c_crossattn': [e]} for e in adapted_cond] else: cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient # apply model by loop over crops output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] assert not isinstance(output_list[0], tuple) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, sampled_concept = sampled_concept, **cond) # if isinstance(x_recon, tuple) and not return_ids: # return x_recon[0] # else: # return x_recon return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
return mean_flat(kl_prior) / np.log(2.0)
5
2023-10-07 09:58:07+00:00
16k
wiio12/LEGO-Prover
lego_prover/prover.py
[ { "identifier": "IsabelleEnv", "path": "lego_prover/env/isa_bridge.py", "snippet": "class IsabelleEnv(gym.Env):\n def __init__(\n self,\n logger=None,\n isabelle_path=\"/Users/wiio/Isabelle2022\",\n working_dir=\"miniF2F\",\n interactive_file=\"miniF2F/interactive.thy\",\n server_host=\"http://127.0.0.1\",\n server_port=8000,\n request_timeout=600,\n log_path=\"./logs\",\n ):\n self.logger = logger\n self.isabelle_path = isabelle_path\n self.working_dir = os.path.abspath(working_dir)\n self.interactive_file = os.path.abspath(interactive_file)\n self.server = f\"{server_host}:{server_port}\"\n self.server_port = server_port\n self.request_timeout = request_timeout\n self.log_path = log_path\n self.isabelle_server = self.get_isabelle_process(server_port)\n self.isabelle_server.run()\n self.stub = None\n \n # wait for isabelle server to run\n time.sleep(3)\n\n self.has_reset = False\n self.reset_options = None\n self.connected = False\n\n def get_isabelle_process(self, server_port):\n self.logger.info(f\"Starting isabelle server at port {server_port}\")\n U.f_mkdir(self.log_path, \"isabelle_server\")\n return SubprocessMonitor(\n commands=[\n \"bash\",\n \"run_server.sh\",\n str(server_port),\n ],\n name=\"isabelle_server\",\n ready_match=r\"Server is running. Press Ctrl-C to stop.\",\n log_path=U.f_join(self.log_path, \"isabelle_server\"),\n cwd=os.path.abspath(\"lego_prover/env/Portal-to-ISAbelle\"),\n server_port=server_port,\n )\n \n def step(\n self,\n code: str,\n formal_statement: str = None,\n quick_check: bool = False,\n ) -> Tuple[ObsType, SupportsFloat, bool, bool, Dict[str, Any]]:\n # if \"theory\" in code:\n # assert \"begin\" in code and \"end\" in code, \\\n # \"Outer syntax error: not complete theorem file\"\n # code = code[code.index(\"begin\") + len(\"begin\"): code.index(\"end\")].strip()\n \n # step 0: replace special token\n for symbol, value in SPECIAL_SYMBOL.items():\n if symbol in code:\n code = code.replace(symbol, value)\n\n # step 1: parse code\n parsed_code = self._get_parsed_code(code)\n\n # step 2: step by step verification\n verified_result = self._verify_step_by_step(parsed_code, quick_check=quick_check)\n if quick_check:\n return verified_result, None, None, None\n\n # step 3: post process error message\n verified_result, code, correct_partial_code, incorrect_code = self._post_process_error_msg(code, parsed_code, verified_result)\n\n # step 4: get skill code\n skill_codes = self._post_process_skill_code(correct_partial_code)\n\n # step 5: get request\n requests = self._get_request(code, skill_codes)\n \n return verified_result, code, skill_codes, requests\n\n def render(self):\n raise NotImplementedError(\"render is not implemented\")\n\n def reset(self, imports=None, hard_reset=False):\n # TODO: we fix the imports for now, we support update imports later.\n if self.stub is None or hard_reset:\n self.stub = create_stub(self.server_port)\n try:\n self.logger.info(self.stub.InitialiseIsabelle(server_pb2.IsaPath(path=self.isabelle_path)).message)\n self.logger.info(self.stub.IsabelleWorkingDirectory(server_pb2.IsaPath(path=self.working_dir)).message)\n self.logger.info(self.stub.IsabelleContext(server_pb2.IsaContext(context=self.interactive_file)).message)\n self.successful_starting = True\n except Exception as e:\n self.logger.info(\"Failure at initializing Isabelle process.\\n\"\n \"Make sure the path your provide is where the Isabelle executable is.\")\n self.logger.info(e)\n # This will reset all state\n self._post(f\"<initialise>\")\n return f\"Starting is successful: {self.successful_starting}\"\n else:\n self._post(\"reset_problem\")\n return f\"soft reset problem successful\"\n \n def close(self):\n if self.stub is not None:\n self._exit()\n self.isabelle_server.stop()\n return not self.connected\n \n # @func_set_timeout(1800, allowOverride=True)\n def _post(self, action):\n reset_retry_cnt = 3\n while reset_retry_cnt > 0:\n try:\n result = self.stub.IsabelleCommand(server_pb2.IsaCommand(command=action)).state\n return result\n except Exception as e:\n self.logger.info(f\"Isabelle environment exception: {e}\")\n self.isabelle_server.terminate()\n self.isabelle_server = self.get_isabelle_process(self.server_port)\n self.isabelle_server.run()\n time.sleep(3)\n self.reset(hard_reset=True)\n reset_retry_cnt -= 1\n assert False, \"Isabelle enviroment fail to reboot!\"\n \n\n def _exit(self):\n try:\n self._post('exit')\n except:\n self.logger.info(\"Post('exit') timed out, kill from system...\")\n os.system(\"ps aux | grep Isabelle | awk '{print $2}' | xargs kill -9 > /dev/null 2>&1\")\n os.system(\"ps aux | grep poly | awk '{print $2}' | xargs kill -9 > /dev/null 2>&1\")\n\n\n def _get_parsed_code(self, theory, tls_name='default') -> List[str]:\n steps = self._post(f\"<parse text> ${theory}\")\n steps = steps.split('<SEP>')\n steps = [s for s in steps if s.strip() != '']\n # remove weird '$' step and whitespace steps\n steps = [s for s in steps if s != '$' and s.strip() != '']\n return steps\n \n def _parse_hammer_output(self, obs):\n \"\"\"Parse the sledgehammer output, otherwise return an empty string\"\"\"\n if '<hammer>' in obs:\n output = obs.split('<hammer>')[1]\n else:\n output = ''\n return output\n\n def _verify_step_by_step(self, steps, quick_check=False):\n done = False\n reason = ''\n success = False\n step_results = []\n tls_name = 'default'\n error_step_index = None\n corrected_step = {}\n for i, step in enumerate(steps):\n try:\n step_time = time.time()\n if \"sledgehammer\" not in step:\n obs, reward, done, metadata, error = self._run_step(step, i, tls_name)\n strip_step = step.strip()\n\n if error is not None and quick_check is True:\n self._post(\"reset_problem\")\n return False\n \n # only fix \"by\" step\n if error is not None and strip_step.startswith(\"by\"):\n old_status = copy((obs, reward, done, metadata, error))\n # try correct the step with sledgehammer step\n one_line_error = error.replace('\\n', ' ')\n self.logger.info(f\"Error with step: [{step}], error: [{one_line_error}]\")\n self.logger.info(\"Trying hammer methods...\")\n obs, reward, done, metadata, error = self._run_sledgehammer(step, i, tls_name)\n if obs is not None:\n actual_step, obs = obs.split(\"<hammer>\")\n actual_step, obs = actual_step.strip(), obs.strip()\n corrected_step[i] = (step, actual_step)\n else:\n obs, reward, done, metadata, error = old_status\n else:\n if quick_check is True:\n self._post(\"reset_problem\")\n return False\n self.logger.info(\"Model use sledgehammer, Trying hammer methods...\")\n obs, reward, done, metadata, error = self._run_sledgehammer(step, i, tls_name)\n if obs is not None:\n actual_step, obs = obs.split(\"<hammer>\")\n actual_step, obs = actual_step.strip(), obs.strip()\n corrected_step[i] = (step, actual_step)\n\n step_time = time.time() - step_time\n step_results.append({\n \"index\": i,\n \"step\": step,\n \"output\": obs,\n \"step_time\": step_time,\n })\n if error is not None:\n reason = error\n success = False\n done = False\n error_step_index = i\n break\n except Exception as e:\n # Timeout - end the proof attempt\n success = False\n done = False\n reason = f'Python exception with error {str(e)}, at command \"{step}\" (line 1)'\n error_step_index = i\n step_results.append(dict(index=i, step=step, output=''))\n break\n\n # Change when successful\n tls_name = 'default_%d' % i\n\n if done and reward == 1.0:\n success = True\n\n result = {\n 'success': success,\n 'reason': reason,\n 'num_steps': len(steps),\n 'last_step': len(step_results),\n 'error_step_index': error_step_index,\n 'step_results': step_results,\n 'corrected_steps': corrected_step,\n }\n\n # This will reset all the problem status\n self._post(\"reset_problem\")\n if quick_check is True:\n return success\n return result\n\n def _run_sledgehammer(self, step, i, tls_name):\n # First try heuristics\n for heuristic in ['by auto', 'by simp', 'by blast', 'by fastforce', 'by force', 'by eval', 'by presburger', 'by sos', 'by arith', 'by linarith', 'by (auto simp: field_simps)', \"sledgehammer\"]:\n step_ = heuristic\n obs, reward, done, metadata, error = self._run_step(step_, i, tls_name) \n if error is None:\n if \"<hammer>\" not in obs:\n obs = '%s <hammer> %s' % (heuristic, obs)\n actual_step = obs.split(\"<hammer>\")[0].strip()\n self.logger.info(f\"Tried step: {step_}, success, replace step: [{step}] with step: [{actual_step}]\")\n return obs, reward, done, metadata, error\n else:\n if step_ == \"sledgehammer\":\n one_line_error = error.replace('\\n', ' ')\n self.logger.info(f\"Tried step: {step_} with error [{one_line_error}]\")\n if 'At command \"<malformed>\"' in one_line_error:\n error = \"Sledgehammer error (line 1): fail to finish the proof with sledgehammer\"\n return None, reward, done, metadata, error\n # Try sledgehammer\n # if error.replace('\\n', ' ').startswith(\"Step error: Outer syntax error (line 1): command expected\"):\n # error = \"Sledgehammer error (line 1): fail to finish the proof with sledgehammer\"\n return obs, reward, done, metadata, error\n\n def _run_step(self, step, i, tls_name):\n obs, reward, done, metadata = self.step_to_top_level_state(\n action=step,\n tls_name=tls_name,\n new_name='default_%d' % i\n )\n error = None\n if 'error:' in obs or 'Step error' in obs or 'Unknown error' in obs:\n error = obs\n return obs, reward, done, metadata, error\n\n def step_to_top_level_state(self, action, tls_name, new_name):\n # last_obs_string = self.stub.IsabelleCommand(server_pb2.IsaCommand(command=f\"<get state> {tls_name}\")).state\n obs_string = \"Step error\"\n try:\n obs_string = self._post(f\"<apply to top level state> {tls_name} <apply to top level state> {action} <apply to top level state> {new_name}\")\n # print(obs_string)\n except Exception as e:\n self.logger.info(\"***Something went wrong***\")\n self.logger.info(e)\n\n if \"error\" in obs_string:\n done = False\n else:\n done = self.is_finished(new_name)\n # done = True if (\"subgoal\" in last_obs_string and \"subgoal\" not in obs_string) else False\n return obs_string, self.reward(done), done, {}\n\n def reward(self, done):\n return 1 if done else 0\n\n def is_finished(self, name_of_tls):\n ret = self._post(f\"<is finished> {name_of_tls}\").strip()\n return ret.startswith(\"t\")\n \n def get_marker_statement(self, code):\n parsed = self._get_parsed_code(code)\n sl = []\n for code in parsed:\n code = code.strip()\n if code.startswith(\"lemma\") or code.startswith(\"theorem\") or code.startswith(\"fun\") or code.startswith(\"definition\"):\n sl.append(code)\n return sl[-1]\n\n \n def _post_process_error_msg(self, code, parsed_code, verified_result):\n old_code = copy(code)\n only_refresh_code = False\n if \"Timeout after\" in verified_result[\"reason\"]:\n verified_result[\"reason\"] = \\\n 'Step timeout error (line 1): the step takes more than 10 seconds to run. At command \"<cmd>\" (line 1)'\n if verified_result[\"success\"] is True:\n only_refresh_code = True\n elif re.search(r\"\\(line [0-9]+\\)\", verified_result[\"reason\"]) is None and \\\n re.search(r'At command \"(.?)+\"', verified_result[\"reason\"]) is None:\n self.logger.info(\"No line number or at command, skip...\")\n self.logger.info(\"The error is:\")\n self.logger.info(verified_result[\"reason\"])\n only_refresh_code = True\n \n matched_codes = []\n for ix, step in enumerate(verified_result[\"step_results\"]):\n step_code = step[\"step\"].strip()\n if step_code not in code:\n # This error is too complicated, I give up\n if len(step[\"output\"]) != 0:\n return verified_result, old_code, \"\".join(matched_codes), code\n else:\n if step_code.startswith(\"(*\"):\n start_index = code.index(\"(*\")\n self.logger.info(f\"Parsed code: {step_code}\")\n self.logger.info(f\"ori code: {code}\")\n for i in range(len(step_code)):\n if code[i+start_index] != step_code[i]:\n assert step_code[i] == \"?\"\n code = code[:i+start_index] + step_code[i] + code[i+start_index+1:]\n self.logger.info(f\"new code: {code}\")\n else:\n self.logger.info(f\"Parsed code: {step_code}\")\n self.logger.info(f\"ori code: {code}\")\n assert False, \"You should add the list!\"\n new_step = None\n if ix in verified_result[\"corrected_steps\"]:\n old_step, new_step = verified_result[\"corrected_steps\"][ix]\n assert old_step == step_code\n matched_code = code[:code.index(step_code) + len(step_code)]\n code = code[code.index(step_code) + len(step_code):]\n if new_step is not None:\n matched_code = matched_code.replace(step_code.strip(), new_step.strip())\n matched_codes.append(matched_code)\n \n correct_code = \"\".join(matched_codes)\n incorrect_code = code\n\n if not only_refresh_code:\n previous_code = \"\".join(matched_codes)\n line_number = previous_code.strip().count(\"\\n\") + 1\n\n error_msg = re.sub(r\"\\(line [0-9]+\\)\", f\"(line {line_number})\", verified_result[\"reason\"])\n error_msg = re.sub(r'At command \"(.?)+\"', f'At command \"{repr(step_code)}\"', error_msg)\n\n verified_result[\"reason\"] = error_msg\n \n new_code = \"\".join(matched_codes + [code])\n\n return verified_result, new_code, correct_code, incorrect_code\n \n def get_lemma_name(self, code):\n name = \"no_name\"\n try:\n if code.startswith('lemma'):\n name = re.findall(r\"lemma (.+):\", code)[0].strip()\n elif code.startswith('theorem'):\n name = re.findall(r\"theorem (.+):\", code)\n if len(name) == 0:\n name = \"theorem_with_no_name\"\n else:\n name = name[0].strip()\n elif code.startswith('fun') and not code.startswith('function'):\n name = re.findall(r\"fun (.+) ::\", code)[0].strip()\n elif code.startswith('function'):\n name = re.findall(r\"function (.+) ::\", code)[0].strip()\n elif code.startswith('definition'):\n name = re.findall(r\"definition (.+) ::\", code)[0].strip()\n else:\n assert False, f\"new code type: {code}\"\n except Exception as e:\n self.logger.info(f\"Error get lemma name, error: {e}, code: {code}\")\n return name\n \n def _post_process_skill_code(self, correct_partial_code):\n start_keyword = [\"lemma\", \"theorem\", \"definition\", \"fun\", \"end\"]\n \n parsed_code = self._get_parsed_code(correct_partial_code)\n all_codes = []\n current_code_set = []\n for code in parsed_code:\n if code.startswith(tuple(start_keyword)):\n if len(current_code_set) > 0:\n skill_code = \"\\n\".join(current_code_set)\n all_codes.append(skill_code.strip())\n current_code_set = [code]\n else:\n assert len(all_codes) == 0 or len(current_code_set) > 0\n if len(current_code_set) != 0:\n current_code_set.append(code)\n \n # remove empty code:\n tmp_code = []\n for code in all_codes:\n code = self._beautify(code, correct_partial_code)\n if len(code) == 0:\n continue\n tmp_code.append(code)\n all_codes = tmp_code\n\n # resolve dependence\n all_names = []\n for code in all_codes:\n all_names.append(self.get_lemma_name(code))\n \n name_and_codes = list(zip(all_names, all_codes))\n name_and_codes = sorted(name_and_codes, key=lambda x: len(x[0]), reverse=True)\n if len(name_and_codes) > 0:\n all_names, all_codes = list(zip(*name_and_codes))\n else:\n all_names, all_codes = [], []\n \n new_codes = []\n for ix, code in enumerate(all_codes):\n current_code = code\n escape_names = [all_names[ix]]\n while True:\n updated = False\n for jx, name in enumerate(all_names):\n if name in escape_names:\n continue\n if name in current_code:\n current_code = f\"{all_codes[jx]}\\n\\n{current_code}\"\n escape_names.append(name)\n updated = True\n if updated is False:\n break\n new_codes.append(current_code)\n \n return list(zip(all_codes, new_codes))\n\n def _beautify(self, ori_code, correct_partial_code):\n parsed_code = self._get_parsed_code(ori_code)\n if ori_code.startswith(\"lemma\") or ori_code.startswith(\"theorem\"):\n if len(parsed_code) <= 1:\n return \"\"\n else:\n return ori_code\n if parsed_code[0].strip() not in correct_partial_code:\n return ori_code\n\n formatted_code = correct_partial_code[correct_partial_code.index(parsed_code[0]):]\n matched_codes = []\n for ix, step_code in enumerate(parsed_code):\n step_code = step_code.strip()\n if step_code not in formatted_code:\n # This error is too complicated, I give up\n return ori_code\n matched_code = formatted_code[:formatted_code.index(step_code) + len(step_code)]\n formatted_code = formatted_code[formatted_code.index(step_code) + len(step_code):]\n matched_codes.append(matched_code)\n \n new_code = \"\".join(matched_codes)\n \n # remove all the comments\n # This regular expression pattern will find all comments in the Isabelle code\n pattern = re.compile(r\"\\(\\*(.*?)\\*\\)\", re.DOTALL)\n\n # Substitute found comments with an empty string\n new_code = re.sub(pattern, '', new_code).strip()\n new_code = '\\n'.join(line for line in new_code.splitlines() if line.strip())\n\n if len(self._get_parsed_code(new_code)) <= 1:\n return \"\"\n return new_code\n\n def _get_request(self, code, skill_codes):\n parsed = self._get_parsed_code(code)\n requests = []\n for line in parsed:\n if line.strip().startswith(\"lemma\"):\n requests.append(line)\n full_codes = [k[1] for k in skill_codes]\n full_code = \"\\n\\n\".join(full_codes)\n requests = list(filter(lambda x: x not in full_code, requests))\n return requests" }, { "identifier": "ActionAgent", "path": "lego_prover/agents/action.py", "snippet": "class ActionAgent:\n def __init__(\n self,\n logger=None,\n model_name=\"gpt-3.5-turbo\",\n temperature=0,\n request_timeout=120,\n ckpt_dir=\"ckpt\",\n ):\n self.logger = logger\n self.ckpt_dir = ckpt_dir\n U.f_mkdir(f\"{ckpt_dir}/action\")\n self.llm = LLMMixture(\n model_name=model_name,\n temperature=temperature,\n request_timeout=request_timeout,\n )\n\n # load decomposer examples:\n self.decomposer_examples = {}\n for file in os.listdir(\"data/decomposer_examples\"):\n with open(os.path.join(\"data/decomposer_examples\", file), \"r\") as f:\n text = f.read()\n self.decomposer_examples[file[:-4]] = text\n \n self.formalizer_examples = {}\n for file in os.listdir(\"data/formalizer_examples\"):\n with open(os.path.join(\"data/formalizer_examples\", file), \"r\") as f:\n text = f.read()\n self.formalizer_examples[file[:-4]] = text\n \n def retrieved_example_skills(self, retrieved_skills):\n random.shuffle(retrieved_skills)\n prompt_examples = []\n for ix, skills in enumerate(retrieved_skills):\n skill_code = skills[\"code\"]\n prompt_example = f\"\"\"###### useful skill {ix+1}: ######\n```isabelle\n{skill_code}\n```\n\"\"\"\n prompt_examples.append(prompt_example)\n \n example_programmes = \"\\n\\n\".join(prompt_examples)\n return example_programmes\n \n def decomposer(self, context):\n system_prompt_template = load_prompt(\"decomposer\")\n system_message = SystemMessage(content=system_prompt_template)\n\n human_prompt_template = load_prompt(\"decomposer_human\")\n human_prompt_template = HumanMessagePromptTemplate.from_template(human_prompt_template)\n\n # post-process in-context-learning examples\n decomposer_examples = copy(self.decomposer_examples)\n if context[\"problem_name\"] in decomposer_examples:\n decomposer_examples.pop(context[\"problem_name\"])\n icl_examples = random.sample(list(decomposer_examples.values()), 3)\n icl_examples = \"\\n\\n####################\\n\\n\".join(icl_examples)\n\n context[\"informal_statement\"] = context[\"informal_statement\"].replace(\"\\n\", ' ').strip()\n context[\"informal_proof\"] = context[\"informal_proof\"].replace(\"\\n\", \" \").strip()\n\n human_message = human_prompt_template.format(\n examples=icl_examples,\n informal_statement=context[\"informal_statement\"],\n informal_proof=context[\"informal_proof\"],\n formal_statement=context[\"formal_statement\"],\n )\n\n conversation = {\n \"sys0\": system_message.content,\n \"human0\": human_message.content,\n }\n\n self.logger.info(\n f\"****decomposer system message****\\n{system_message.content}\"\n )\n\n self.logger.info(\n f\"****decomposer human message****\\n{human_message.content}\"\n )\n\n n_retry = 3\n informal_proof = context[\"informal_proof\"]\n skill_requests = []\n while n_retry > 0:\n try:\n ai_message = self.llm([system_message, human_message], temperature=0)\n self.logger.info(\n f\"****decomposer ai message****\\n{ai_message.content}\"\n )\n conversation[f\"ai{3-n_retry}\"] = ai_message.content\n message = ai_message.content\n if \"####################\" in message:\n message = message[:message.index(\"####################\")]\n # Extracting Error Analysis content\n informal_proof = re.search(r'## Structured informal proof\\n(.*?)\\n\\n#', message, re.DOTALL).group(1).strip()\n\n # Extracting each skill request's name and its content\n skill_requests = re.findall(r\"```isabelle\\n(.*?)\\n```\", message, re.DOTALL)\n break\n except AssertionError as e:\n if \"query too long\" in str(e):\n self.logger.warn(str(e))\n break\n except Exception as e:\n self.logger.info(f\"Error occur in decomposer: {str(e)}\")\n n_retry -= 1\n examples = random.sample(list(decomposer_examples.values()), 3)\n examples = \"\\n\\n####################\\n\\n\".join(examples)\n human_message = human_prompt_template.format(\n examples=examples,\n informal_statement=context[\"informal_statement\"],\n informal_proof=context[\"informal_proof\"],\n formal_statement=context[\"formal_statement\"],\n )\n time.sleep(5)\n ret_request = []\n for skill in skill_requests:\n if \"N/A\" in skill:\n continue\n ret_request.append(skill)\n\n if len(ret_request) > 5:\n self.logger.info(f\"skill request more than 5, with len {len(ret_request)}\")\n ret_request = random.sample(ret_request, 5)\n\n return informal_proof, ret_request, conversation\n\n def critic(self, context, code_last_round=None, error_last_round=None):\n system_prompt_template = load_prompt(\"critic_request\")\n system_prompt_template = SystemMessagePromptTemplate.from_template(system_prompt_template)\n system_message = system_prompt_template.format(examples=\"\")\n\n human_prompt_template = load_prompt(\"critic_request_human\")\n human_prompt_template = HumanMessagePromptTemplate.from_template(human_prompt_template)\n\n if code_last_round is None:\n code_last_round = \"No code from last round...\"\n else:\n code_last_round = code_last_round.split('\\n')\n new_code = []\n for ix, line in enumerate(code_last_round):\n line = f\"#{ix+1} \" + line\n new_code.append(line)\n code_last_round = \"\\n\".join(new_code)\n \n if error_last_round is None:\n error_last_round = \"No error from last round...\"\n\n human_message = human_prompt_template.format(\n code=code_last_round,\n error=error_last_round,\n )\n\n # self.logger.info(\n # f\"****critic agent system message****\\n{system_message.content}\"\n # )\n\n self.logger.info(\n f\"****critic agent human message****\\n{human_message.content}\"\n )\n\n n_retry = 3\n error_analysis = \"No error analysis...\"\n skill_requests = []\n while n_retry > 0:\n try:\n ai_message = self.llm([system_message, human_message])\n self.logger.info(\n f\"****critic agent ai message****\\n{ai_message.content}\"\n )\n message = ai_message.content\n # Extracting Error Analysis content\n error_analysis = re.search(r'# Error analysis:\\n(.*?)\\n\\n#', message, re.DOTALL).group(1).strip()\n\n # Extracting each skill request's name and its content\n skill_requests = re.findall(r'## Skill \\d+: ([\\w_]+)\\n```isabelle\\n(.*?)\\n```', message, re.DOTALL)\n break\n except AssertionError as e:\n if \"query too long\" in str(e):\n self.logger.warn(str(e))\n break\n except Exception as e:\n self.logger.info(f\"Error occur in auto_formal_pre: {str(e)}\")\n n_retry -= 1\n time.sleep(5)\n\n return error_analysis, skill_requests\n \n def render_formalizer_system_message(self):\n system_template = load_prompt(\"formalizer\")\n return SystemMessage(content=system_template)\n \n def render_formalizer_human_message(\n self,\n skills,\n context,\n informal_proof=None,\n n_example=3,\n ) -> HumanMessage:\n human_prompt_template = load_prompt(\"formalizer_human\")\n human_prompt_template = HumanMessagePromptTemplate.from_template(human_prompt_template)\n\n formalizer_examples = copy(self.formalizer_examples)\n if context[\"problem_name\"] in formalizer_examples:\n formalizer_examples.pop(context[\"problem_name\"])\n\n examples = random.sample(list(formalizer_examples.values()), n_example)\n examples = \"\\n\\n####################\\n\\n\".join(examples)\n context[\"informal_statement\"] = context[\"informal_statement\"].replace(\"\\n\", ' ').strip()\n context[\"informal_proof\"] = context[\"informal_proof\"].replace(\"\\n\", \" \").strip()\n\n skills = self.retrieved_example_skills(skills)\n \n human_message = human_prompt_template.format(\n skill_examples = skills,\n examples=examples,\n informal_statement=context[\"informal_statement\"],\n informal_proof=context[\"informal_proof\"] if informal_proof is None else informal_proof,\n formal_statement=context[\"formal_statement\"],\n )\n\n return human_message\n\n\n def render_human_message(\n self, \n context, \n code=None,\n error=None,\n error_analysis=None,\n informal_proof=None,\n ) -> HumanMessage:\n human_prompt_template = load_prompt(\"auto_formal2_human\")\n human_prompt_template = HumanMessagePromptTemplate.from_template(human_prompt_template)\n\n if code is None:\n code = \"No code from last round...\"\n else:\n code = code.split('\\n')\n new_code = []\n for ix, line in enumerate(code):\n line = f\"#{ix+1} \" + line\n new_code.append(line)\n code = \"\\n\".join(new_code)\n \n if error is None:\n error = \"No error from last round...\"\n if error_analysis is None:\n error_analysis = \"No analysis...\"\n\n human_message = human_prompt_template.format(\n informal_statement=context[\"informal_statement\"],\n informal_proof=context[\"informal_proof\"] if informal_proof is None else informal_proof,\n formal_statement=context[\"formal_statement\"],\n code_last_round=code,\n error_last_round=error,\n error_analysis=error_analysis,\n )\n\n return human_message\n\n def process_ai_message(self, message, context):\n assert isinstance(message, AIMessage)\n\n retry = 3\n error = None\n while retry > 0:\n try:\n code_pattern = re.compile(r\"```(?:[i|I]sabelle)(.*?)```\", re.DOTALL)\n text = message.content[message.content.index(\"# Formalized Code\"):]\n code = \"\\n\".join(code_pattern.findall(text)).strip()\n return code\n except Exception as e:\n retry -= 1\n error = e\n time.sleep(1)\n self.logger.info(f\"Error parsing action response (before program execution): {error}\")\n return False" }, { "identifier": "CurriculumAgent", "path": "lego_prover/agents/curriculum.py", "snippet": "class CurriculumAgent:\n def __init__(\n self,\n logger=None,\n ckpt_dir=\"ckpt\",\n resume=False,\n miniF2F_tasks : mp.Queue = None,\n curriculum_task_type : str = \"simple_curriculum\",\n curriculum_agent_lock = U.WithEmpty()\n ):\n self.logger=logger\n self.miniF2F_tasks = miniF2F_tasks\n self.curriculum_task_type = curriculum_task_type\n self.curriculum_agent_lock = curriculum_agent_lock\n self.ckpt_dir = ckpt_dir\n U.f_mkdir(f\"{ckpt_dir}/curriculum/vectordb\")\n if resume:\n self.logger.info(f\"Loading Curriculum Agent from {ckpt_dir}/curriculum\")\n self.sync_checkpoint()\n else:\n self.completed_tasks = []\n self.failed_tasks = []\n \n def sync_checkpoint(self,):\n if os.path.exists(f\"{self.ckpt_dir}/curriculum/completed_tasks.json\"):\n self.completed_tasks = U.load_json(f\"{self.ckpt_dir}/curriculum/completed_tasks.json\")\n if os.path.exists(f\"{self.ckpt_dir}/curriculum/failed_tasks.json\"):\n self.failed_tasks = U.load_json(f\"{self.ckpt_dir}/curriculum/failed_tasks.json\")\n\n @property\n def easy_to_hard_curriculum(self):\n result = []\n for name in os.listdir(\"data/full_data/valid\"):\n path = os.path.join(\"data/full_data/valid\", name)\n context = U.load_json(path)\n result.append((path, len(context[\"informal_proof\"])))\n result = sorted(result, key=lambda x: x[1])\n result = [x[0] for x in result]\n return result\n\n @property\n def progress(self):\n return len(self.completed_tasks)\n\n def propose_next_task(self, max_retries=5, idx=None):\n if self.curriculum_task_type == \"example\":\n filename = os.listdir(\"data/examples\")[self.progress]\n task = filename[:-5]\n context = load_context(problem_name=os.path.join(\"data/examples\", filename))\n return task, context\n elif self.curriculum_task_type == \"simple_curriculum\":\n assert idx is not None\n file_path = self.easy_to_hard_curriculum[idx]\n task = file_path\n context = load_context(file_path)\n return task, context\n elif self.curriculum_task_type == \"queue_curriculum\":\n while True:\n if self.miniF2F_tasks.qsize() == 0:\n return \"\", None\n file_path = self.miniF2F_tasks.get()\n context = load_context(file_path)\n if file_path not in self.completed_tasks:\n break\n return file_path, context\n else:\n raise NotImplementedError\n\n def get_task_retry_count(self, task):\n cnt = 0\n for t in self.failed_tasks:\n if t == task:\n cnt += 1\n return cnt\n\n def propose_next_manual_task(self):\n confirmed = False\n task = \"\"\n while not confirmed:\n task = input(\"Enter task: \")\n print(f\"Task: {task}\")\n confirmed = input(\"Confirm? (y/n)\").lower() in [\"y\", \"\"]\n context = load_context(task)\n return task, context\n\n def update_exploration_progress(self, info):\n with self.curriculum_agent_lock:\n self.sync_checkpoint()\n\n task = info[\"task\"]\n if info[\"success\"]:\n self.logger.info(f\"Completed task {task}.\")\n self.completed_tasks.append(task)\n else:\n self.logger.info(\n f\"Failed to complete task {task}. Skipping to next task.\"\n )\n self.failed_tasks.append(task)\n\n # clean up tasks and dump to disk\n self.clean_up_tasks()\n\n def clean_up_tasks(self):\n updated_completed_tasks = []\n # record repeated failed tasks\n updated_failed_tasks = self.failed_tasks\n # dedup but keep order\n for task in self.completed_tasks:\n if task not in updated_completed_tasks:\n updated_completed_tasks.append(task)\n\n # remove completed tasks from failed tasks\n for task in updated_completed_tasks:\n while task in updated_failed_tasks:\n updated_failed_tasks.remove(task)\n\n self.completed_tasks = updated_completed_tasks\n self.failed_tasks = updated_failed_tasks\n\n # dump to json\n U.dump_json(\n self.completed_tasks, f\"{self.ckpt_dir}/curriculum/completed_tasks.json\"\n )\n U.dump_json(self.failed_tasks, f\"{self.ckpt_dir}/curriculum/failed_tasks.json\")" }, { "identifier": "SkillManager", "path": "lego_prover/agents/skill.py", "snippet": "class SkillManager:\n def __init__(\n self,\n rank = None,\n logger = None,\n ckpt_dir=\"ckpt\",\n skill_manager_lock=U.WithEmpty(),\n chroma_bridge: ChromaBridge = None\n ):\n self.rank = rank\n self.logger = logger\n self.skill_manager_lock = skill_manager_lock\n self.chroma_bridge = chroma_bridge\n U.f_mkdir(f\"{ckpt_dir}/skill/code\")\n U.f_mkdir(f\"{ckpt_dir}/skill/history_problem\")\n U.f_mkdir(f\"{ckpt_dir}/skill/requests\")\n U.f_mkdir(f\"{ckpt_dir}/skill/description\")\n U.f_mkdir(f\"{ckpt_dir}/skill/vectordb\")\n self.ckpt_dir = ckpt_dir\n self.encoder = tiktoken.encoding_for_model(\"gpt-4\")\n with self.skill_manager_lock:\n self.sync_checkpoint()\n \n def sync_checkpoint(self):\n if os.path.exists(f\"{self.ckpt_dir}/skill/skills.json\"):\n self.skills = U.load_json(f\"{self.ckpt_dir}/skill/skills.json\")\n else:\n self.skills = {}\n if os.path.exists(f\"{self.ckpt_dir}/skill/codes.json\"):\n self.codes = U.load_json(f\"{self.ckpt_dir}/skill/codes.json\")\n else:\n self.codes = {}\n if os.path.exists(f\"{self.ckpt_dir}/skill/skill_request.json\"):\n self.skill_requests = U.load_json(f\"{self.ckpt_dir}/skill/skill_request.json\")\n else:\n self.skill_requests = {}\n \n def add_new_problem(self, problem_name, formal_statement):\n data = (\"problem_add_text\", {\n \"add_text\": formal_statement,\n \"problem_name\": problem_name,\n })\n output = self.chroma_bridge.run_cmd(data)\n assert output[\"error\"] is None, \"error is not None\"\n print(output[\"output\"])\n\n def add_new_request(self, problem_name, formal_statement, init_update_count=0):\n with self.skill_manager_lock:\n self.sync_checkpoint()\n\n exists_formal_statements = [value['formal_statement'] for value in self.skill_requests.values()]\n if len(get_close_matches(formal_statement, exists_formal_statements, n=1, cutoff=0.85)) != 0:\n return\n\n with self.skill_manager_lock:\n self.sync_checkpoint()\n request_name = f\"request_{len(self.skill_requests)}\"\n self.skill_requests[request_name] = {\n \"request_name\": request_name,\n \"problem_name\": problem_name,\n \"formal_statement\": formal_statement,\n \"update_count\": init_update_count,\n }\n \n\n data = (\"request_add_text\", {\n \"add_text\": formal_statement,\n \"request_name\": request_name,\n })\n \n assert self.chroma_bridge is not None\n output = self.chroma_bridge.run_cmd(data)\n if output[\"error\"] is None:\n # print(\"There are\", output[\"output\"], \"code\")\n assert output[\"output\"] == len(\n self.skill_requests\n ), (\"requestdb is not synced with skill_request.json, \"\n f\"there are {output['output']} in requestdb but {len(self.skill_requests)} in skill_request.json\")\n \n U.dump_text(\n formal_statement, f\"{self.ckpt_dir}/skill/requests/{request_name}.thy\"\n )\n U.dump_json(self.skill_requests, f\"{self.ckpt_dir}/skill/skill_request.json\")\n self.logger.info(f\"Added skill, marker:\\n ```isabelle\\n{formal_statement}```\\n\") \n\n def add_new_skill(self, skill_name, description, marker, full_code, origin=\"\", init_update_count=0):\n with self.skill_manager_lock:\n self.sync_checkpoint()\n\n exists_markers = [value['marker'] for value in self.skills.values()]\n if len(self.encoder.encode(marker)) > 650:\n return\n if len(get_close_matches(marker, exists_markers, n=1, cutoff=0.85)) != 0:\n return\n\n if not bool(re.match(\"^[a-zA-Z0-9_']+$\", skill_name)):\n skill_name = f\"skill_{len(self.skills)}\"\n\n skill_name = skill_name.lower().strip().replace(\" \", \"_\")\n if skill_name in self.skills:\n i = 2\n while f\"{skill_name}V{i}\" in self.skills:\n i += 1\n skill_name = f\"{skill_name}V{i}\"\n\n with self.skill_manager_lock:\n self.sync_checkpoint()\n\n self.skills[skill_name] = {\n \"skill_name\": skill_name,\n \"marker\": marker,\n \"description\": description,\n \"full_code\": full_code,\n \"origin\": origin,\n \"update_count\": init_update_count,\n }\n\n # add_text = f\"code: {marker}, skill: {skill_name}, description: {description},\"\n add_text = marker\n \n # use chroma bridge to add skill to the chromadb\n assert self.chroma_bridge is not None\n data = (\"skill_add_text\",{\n \"skill_name\": skill_name,\n \"add_text\": add_text,\n })\n output = self.chroma_bridge.run_cmd(data)\n if output[\"error\"] is None:\n assert output[\"output\"] == len(\n self.skills\n ), (\"vectordb is not synced with skill.json\"\n f\"there are {output['output']} in skilldb but {len(self.skills)} in skills.json\")\n \n U.dump_text(\n marker, f\"{self.ckpt_dir}/skill/code/{skill_name}.thy\"\n )\n U.dump_text(\n description,\n f\"{self.ckpt_dir}/skill/description/{skill_name}.txt\",\n )\n U.dump_json(self.skills, f\"{self.ckpt_dir}/skill/skills.json\")\n self.logger.info(f\"Added skill, marker:\\n ```isabelle\\n{marker}```\\nfull_code:\\nisabelle\\n{full_code}\\n\")\n\n def update_count(self, skill_name):\n with self.skill_manager_lock:\n self.sync_checkpoint()\n self.skills[skill_name][\"update_count\"] += 1\n U.dump_json(self.skills, f\"{self.ckpt_dir}/skill/skills.json\")\n \n def update_count_request(self, request_name):\n with self.skill_manager_lock:\n self.sync_checkpoint()\n self.skill_requests[request_name][\"update_count\"] += 1\n U.dump_json(self.skill_requests, f\"{self.ckpt_dir}/skill/skill_request.json\")\n\n def retrieve_skills(self, query, k):\n ret_skill = []\n k = min(len(self.skills), k)\n if k != 0:\n self.logger.info(f\"Skill Manager retrieving for {k} skills\")\n with self.skill_manager_lock:\n # query = f\"informal statement: {context['informal_statement']}, informal proof: {context['informal_proof']}, formal_statement: {context['formal_statement']}\"\n data = (\"skill_query\", {\"query\": query, \"k\": k})\n outputs = self.chroma_bridge.run_cmd(data)\n ret_skill_name = []\n if outputs[\"error\"] is None:\n ret_skill_name = outputs[\"output\"]\n self.sync_checkpoint()\n self.logger.info(\n f\"Skill Manager retrieved skills for query:\\n ```\\n\"\n f\"{query}\\n```\\n\"\n f\"{', '.join(ret_skill_name)}\"\n )\n\n for skill_name in ret_skill_name:\n retrieved_skill = {\n \"skill\": skill_name,\n \"description\": self.skills[skill_name][\"description\"],\n \"code\": self.skills[skill_name][\"full_code\"],\n \"marker\": self.skills[skill_name][\"marker\"],\n }\n ret_skill.append(retrieved_skill)\n return ret_skill\n\n def retrieve_skills_with_context(self, context):\n ret_skill = []\n\n k = min(len(self.skills), 6)\n if k != 0:\n self.logger.info(f\"Skill Manager retrieving for {k} skills\")\n with self.skill_manager_lock:\n query = context['formal_statement']\n data = (\"skill_query\", {\"query\": query, \"k\": k})\n outputs = self.chroma_bridge.run_cmd(data)\n ret_skill_name = []\n if outputs[\"error\"] is None:\n ret_skill_name = outputs[\"output\"]\n self.sync_checkpoint()\n self.logger.info(\n f\"Skill Manager retrieved skills for query:\\n ```\\n\"\n f\"{query}\\n```\\n\"\n f\"{', '.join(ret_skill_name)}\"\n )\n \n for skill_name in ret_skill_name:\n retrieved_skill = {\n \"skill\": skill_name,\n \"description\": self.skills[skill_name][\"description\"],\n \"code\": self.skills[skill_name][\"full_code\"],\n \"marker\": self.skills[skill_name][\"marker\"],\n }\n ret_skill.append(retrieved_skill)\n\n return ret_skill" } ]
import os import random import re import time import multiprocessing as mp import tiktoken import lego_prover.utils as U import logging from lego_prover.env.isa_bridge import IsabelleEnv from .agents import ActionAgent from .agents import CurriculumAgent from .agents import SkillManager from langchain.schema import HumanMessage
11,559
class Prover: def __init__( self, rank: int = None, isabelle_path: str = None, server_port: int = 8000, model_name: str = "gpt-4", temperature: int = 0, action_agent_task_max_retries: int = 4, curriculum_task_type: str = "simple_curriculum", curriculum_agent_lock = U.WithEmpty(), skill_manager_lock = U.WithEmpty(), chroma_bridge = None, openai_api_request_timeout: int = 6000, ckpt_dir: str = "ckpt", resume: bool = False, miniF2F_tasks: mp.Queue = None, ): """ Initializes a new instance of the Prover class. Args: rank (int): The rank of the prover process. isabelle_path (str): The path to the Isabelle directory. server_port (int): The port number for the server. model_name (str): The name of the OpenAI model to use. temperature (int): The temperature for sampling the LLM. action_agent_task_max_retries (int): The maximum number of retries for an action agent task. curriculum_task_type (str): The type of curriculum task to use. curriculum_agent_lock: The lock for the curriculum agent. skill_manager_lock: The lock for the skill manager. chroma_bridge: The ChromaBridge object for controlling the keyboard and mouse. openai_api_request_timeout (int): The timeout for OpenAI API requests. ckpt_dir (str): The directory for saving checkpoints. resume (bool): Whether to resume from the checkpoint. miniF2F_tasks (mp.Queue): The queue for miniF2F tasks. """ # init env self.rank = rank self.logger = logging.getLogger(f'prover-{rank}') self.logger.info(f"lego_prover running in rank {rank}") self.model_name = model_name self.env = IsabelleEnv( logger=self.logger, isabelle_path=isabelle_path, server_port=server_port ) self.action_agent_model_name = model_name self.tokenizer_encoder = tiktoken.encoding_for_model( self.action_agent_model_name) self.ckpt_dir = ckpt_dir self.temperature = temperature # init agents self.action_agent = ActionAgent( logger=self.logger, model_name=model_name, temperature=temperature, request_timeout=openai_api_request_timeout, ckpt_dir=ckpt_dir, ) self.action_agent_task_max_retries = action_agent_task_max_retries
class Prover: def __init__( self, rank: int = None, isabelle_path: str = None, server_port: int = 8000, model_name: str = "gpt-4", temperature: int = 0, action_agent_task_max_retries: int = 4, curriculum_task_type: str = "simple_curriculum", curriculum_agent_lock = U.WithEmpty(), skill_manager_lock = U.WithEmpty(), chroma_bridge = None, openai_api_request_timeout: int = 6000, ckpt_dir: str = "ckpt", resume: bool = False, miniF2F_tasks: mp.Queue = None, ): """ Initializes a new instance of the Prover class. Args: rank (int): The rank of the prover process. isabelle_path (str): The path to the Isabelle directory. server_port (int): The port number for the server. model_name (str): The name of the OpenAI model to use. temperature (int): The temperature for sampling the LLM. action_agent_task_max_retries (int): The maximum number of retries for an action agent task. curriculum_task_type (str): The type of curriculum task to use. curriculum_agent_lock: The lock for the curriculum agent. skill_manager_lock: The lock for the skill manager. chroma_bridge: The ChromaBridge object for controlling the keyboard and mouse. openai_api_request_timeout (int): The timeout for OpenAI API requests. ckpt_dir (str): The directory for saving checkpoints. resume (bool): Whether to resume from the checkpoint. miniF2F_tasks (mp.Queue): The queue for miniF2F tasks. """ # init env self.rank = rank self.logger = logging.getLogger(f'prover-{rank}') self.logger.info(f"lego_prover running in rank {rank}") self.model_name = model_name self.env = IsabelleEnv( logger=self.logger, isabelle_path=isabelle_path, server_port=server_port ) self.action_agent_model_name = model_name self.tokenizer_encoder = tiktoken.encoding_for_model( self.action_agent_model_name) self.ckpt_dir = ckpt_dir self.temperature = temperature # init agents self.action_agent = ActionAgent( logger=self.logger, model_name=model_name, temperature=temperature, request_timeout=openai_api_request_timeout, ckpt_dir=ckpt_dir, ) self.action_agent_task_max_retries = action_agent_task_max_retries
self.curriculum_agent = CurriculumAgent(
2
2023-10-09 04:23:43+00:00
16k
LiyaoTang/ERDA
models/build_models.py
[ { "identifier": "load_config", "path": "config/utils.py", "snippet": "def load_config(cfg_path=None, dataset_name=None, cfg_name=None, cfg_group=None, reload=True):\n # cfg from path\n if cfg_path is not None:\n update = None\n if os.path.isfile(cfg_path):\n # update on the default cfg\n from config.base import Base, Config\n update = Base(cfg_path)\n cfg_path = [update.dataset.lower(), 'default']\n else:\n # directly specified cfg\n cfg_path = cfg_path.replace('/', '.').split('.')\n cfg_path = cfg_path if cfg_path[0] == 'config' else ['config'] + cfg_path\n cfg_module = cfg_path[1]\n cfg_class = '.'.join(cfg_path[2:])\n mod = _import_module(cfg_module)\n if hasattr(mod, cfg_class):\n cfg = getattr(mod, cfg_class)\n else:\n cfg = load_config(dataset_name=cfg_path[1], cfg_name=cfg_class, reload=reload)\n\n if update is not None:\n cfg = Config(cfg) # avoid overriding\n cfg.update(update, exclude=[]) # full override with no exclude\n return cfg\n\n # setup dict\n cfg_name_dict = load_config.cfg_name_dict # dataset_name -> {cfg.name -> cfg.idx_name}\n cfg_module_dict = load_config.cfg_module_dict # dataset_name -> cfg_module\n\n if dataset_name is not None and dataset_name not in cfg_module_dict or reload:\n mod = _import_module(dataset_name)\n cfg_module_dict[dataset_name] = mod\n cfg_name_dict[dataset_name] = {}\n for i in dir(mod):\n if not is_config(i, mod=mod): # use the 'base' class imported in 'mod'\n continue\n cfg = getattr(mod, i)\n if cfg.name:\n cfg_name_dict[dataset_name][cfg.name] = cfg.idx_name\n\n # module/cfg from dataset/cfg name\n mod = cfg_module_dict[dataset_name]\n if cfg_name is not None:\n if cfg_name not in cfg_name_dict[dataset_name]:\n raise KeyError(f'no cfg_name = {cfg_name} in module {dataset_name}')\n idx_name = cfg_name_dict[dataset_name][cfg_name]\n return getattr(mod, idx_name)\n elif cfg_group is not None:\n if not hasattr(mod, cfg_group):\n raise KeyError(f'no cfg_group = {cfg_group} in module {dataset_name}')\n cfg_g = getattr(mod, cfg_group)\n if isinstance(cfg_g, type(mod.Base)) and cfg_g._store_dict:\n cfg_g = cfg_g._store_dict\n if not isinstance(cfg_g, (tuple, list, dict, set)):\n raise ValueError(f'cfg_group = {cfg_group} appears to be {cfg_g}, not of type (tuple, list, dict, set)')\n return cfg_g\n return mod" }, { "identifier": "log_config", "path": "config/utils.py", "snippet": "def log_config(config, title='', f_out=None, prefix='', base=None):\n if f_out is None:\n f_out = sys.stdout\n if base is None:\n root = os.path.join(os.getcwd(), os.path.dirname(__file__), '../')\n sys.path += [] if root in sys.path or os.path.realpath(root) in sys.path else [root]\n from config.base import Base as base\n\n print(f'\\n{prefix}<<< ======= {config._cls} ======= {title if title else config.name}', file=f_out)\n max_len = max([len(k) for k in dir(config) if not k.startswith('_')] + [0])\n for k in config.keys(): # dir would sort\n # if k.startswith('_') or _is_method(getattr(config, k)):\n # continue\n cur_attr = getattr(config, k)\n if isinstance(cur_attr, list) and len(str(cur_attr)) > 200: # overlong list\n cur_attr = '[' + f'\\n{prefix}\\t\\t'.join([''] + [str(s) for s in cur_attr]) + f'\\n{prefix}\\t]'\n\n print('\\t%s%s\\t= %s' % (prefix + k, ' ' * (max_len-len(k)), str(cur_attr)), file=f_out)\n if is_config(cur_attr, base=base):\n log_config(cur_attr, f_out=f_out, prefix=prefix+'\\t', base=base)\n print('\\n', file=f_out, flush=True)" }, { "identifier": "get_block_cfg", "path": "config/blocks.py", "snippet": "def get_block_cfg(block, raise_not_found=True, verbose=False):\n \"\"\"\n '__xxxx__' - special block for config use\n '{block_n}-{attr 1}_{attr 2}....': cfg class name - attrs, with multiple attr connected via \"_\"\n \"\"\"\n\n # from . import blocks\n block = block.split('-')\n blk_cls = block[0]\n attr = '-'.join(block[1:])\n\n if blk_cls.startswith('__') and blk_cls.endswith('__'):\n blk = __cfg__()\n elif blk_cls in globals():\n blk = globals()[blk_cls]()\n elif raise_not_found:\n raise KeyError(f'block not found: {blk_cls} - {attr}')\n else:\n return None\n \n if attr:\n blk.parse(attr)\n if blk._assert:\n blk._assert()\n\n # # get the default setting\n # blk = Block(blk_cls)\n # # update\n # blk_fn = getattr(blocks, blk_cls)\n # blk = blk_fn(blk, attr)\n if not blk.name:\n blk.name = blk_cls\n if not blk.attr:\n blk.attr = attr\n if verbose:\n log_config(blk)\n return blk" }, { "identifier": "print_dict", "path": "utils/logger.py", "snippet": "def print_dict(d, prefix='', except_k=[], fn=None, head=None, dict_type=(dict,), list_type=(list, tuple), expand_len=120):\n if head is not None:\n d = {head: d}\n for k, v in d.items():\n if k in except_k:\n continue\n if isinstance(d[k], dict_type):\n print(f'{prefix}{str(k)}:')\n print_dict(d[k], prefix=f'{prefix}\\t', except_k=except_k, fn=fn, expand_len=120)\n else:\n if fn:\n rst = None\n try:\n if isinstance(v, list_type):\n rst = v.__class__([fn(vv) for vv in v])\n else:\n rst = fn(v)\n except:\n pass\n v = rst if rst else v\n line = f'{prefix}{str(k)}\\t{str(v)}'\n if isinstance(v, list_type) and expand_len and len(str(line)) > expand_len: # overlong\n line_pre = f'{prefix}{str(k)}\\t' + ('[' if isinstance(v, list) else '(')\n line_post = f'\\n{prefix}\\t' + (']' if isinstance(v, list) else ')')\n if set(dict_type).issuperset(set([type(s) for s in v])): # all dict in list\n print(line_pre)\n for s in v[:-1]:\n print_dict(s, prefix=f'{prefix}\\t\\t')\n print(f'{prefix}\\t\\t,')\n print_dict(v[-1], prefix=f'{prefix}\\t\\t')\n line = line_post\n else:\n line = line_pre + f'\\n{prefix}\\t\\t'.join([''] + [str(s) for s in v]) + line_post\n\n print(line)" }, { "identifier": "resnet_multi_part_segmentation_head", "path": "models/heads/seg_head.py", "snippet": "def resnet_multi_part_segmentation_head(config,\n inputs,\n F,\n base_fdim,\n is_training,\n init='xavier',\n weight_decay=0,\n activation_fn='relu',\n bn=True,\n bn_momentum=0.98,\n bn_eps=1e-3):\n \"\"\"A head for multi-shape part segmentation with resnet backbone.\n\n Args:\n config: config file\n inputs: a dict contains all inputs\n F: all stage features\n base_fdim: the base feature dim\n is_training: True indicates training phase\n init: weight initialization method\n weight_decay: If > 0, add L2Loss weight decay multiplied by this float.\n activation_fn: Activation function\n bn: If True, add batch norm after convolution\n\n Returns:\n logits for all shapes with all parts [num_classes, num_points, num_parts_i]\n \"\"\"\n F_up = []\n with tf.variable_scope('resnet_multi_part_segmentation_head') as sc:\n fdim = base_fdim\n features = F[-1]\n\n features = nearest_upsample_block(4, inputs, features, 'nearest_upsample_0')\n features = tf.concat((features, F[3]), axis=1)\n features = conv1d_1x1(features, 8 * fdim, 'up_conv0', is_training=is_training, with_bias=False, init=init,\n weight_decay=weight_decay, activation_fn=activation_fn, bn=bn, bn_momentum=bn_momentum,\n bn_eps=bn_eps)\n F_up.append(features)\n\n features = nearest_upsample_block(3, inputs, features, 'nearest_upsample_1')\n features = tf.concat((features, F[2]), axis=1)\n features = conv1d_1x1(features, 4 * fdim, 'up_conv1', is_training=is_training, with_bias=False, init=init,\n weight_decay=weight_decay, activation_fn=activation_fn, bn=bn, bn_momentum=bn_momentum,\n bn_eps=bn_eps)\n F_up.append(features)\n\n features = nearest_upsample_block(2, inputs, features, 'nearest_upsample_2')\n features = tf.concat((features, F[1]), axis=1)\n features = conv1d_1x1(features, 2 * fdim, 'up_conv2', is_training=is_training, with_bias=False, init=init,\n weight_decay=weight_decay, activation_fn=activation_fn, bn=bn, bn_momentum=bn_momentum,\n bn_eps=bn_eps)\n F_up.append(features)\n\n features = nearest_upsample_block(1, inputs, features, 'nearest_upsample_3')\n features = tf.concat((features, F[0]), axis=1)\n features = conv1d_1x1(features, fdim, 'up_conv3', is_training=is_training, with_bias=False, init=init,\n weight_decay=weight_decay, activation_fn=activation_fn, bn=bn, bn_momentum=bn_momentum,\n bn_eps=bn_eps)\n F_up.append(features) # [BxN, d]\n F_up = list(reversed(F_up))\n\n if config.sep_head or config.arch_up:\n # build head with config.arch_out\n return F_up, None\n\n shape_heads = [] # [BxN, ...]\n shape_latents = []\n for i_shape in range(config.num_classes): # separate head for diff shape\n head = features\n head = conv1d_1x1(head, fdim, f'shape{i_shape}_head', is_training=is_training, with_bias=False, init=init,\n weight_decay=weight_decay, activation_fn=activation_fn, bn=bn, bn_momentum=bn_momentum,\n bn_eps=bn_eps)\n shape_latents += [head]\n\n head = conv1d_1x1(head, config.num_parts[i_shape], f'shape{i_shape}_pred', is_training=is_training,\n with_bias=True, init=init,\n weight_decay=weight_decay, activation_fn=None, bn=False)\n shape_heads.append(head)\n\n # select out points of each shape - different shape corresponds to different parts (point label)\n shape_label = inputs['super_labels'] # [B]\n logits_with_point_label = [()] * config.num_classes # [(B'xN - pred, B'xN - label), ...]\n for i_shape in range(config.num_classes):\n i_shape_inds = tf.where(tf.equal(shape_label, i_shape))\n logits_i = tf.gather_nd(shape_heads[i_shape], i_shape_inds)\n point_labels_i = tf.gather_nd(inputs['point_labels'], i_shape_inds)\n logits_with_point_label[i_shape] = (logits_i, point_labels_i)\n logits_all_shapes = shape_heads\n\n return F_up, (shape_latents, logits_with_point_label, logits_all_shapes)" }, { "identifier": "resnet_scene_segmentation_head", "path": "models/heads/seg_head.py", "snippet": "def resnet_scene_segmentation_head(config,\n inputs,\n F,\n base_fdim,\n is_training,\n init='xavier',\n weight_decay=0,\n activation_fn='relu',\n bn=True,\n bn_momentum=0.98,\n bn_eps=1e-3):\n \"\"\"A head for scene segmentation with resnet backbone.\n\n Args:\n config: config file\n inputs: a dict contains all inputs\n F: all stage features\n base_fdim: the base feature dim\n is_training: True indicates training phase\n init: weight initialization method\n weight_decay: If > 0, add L2Loss weight decay multiplied by this float.\n activation_fn: Activation function\n bn: If True, add batch norm after convolution\n\n Returns:\n prediction logits [num_points, num_classes]\n \"\"\"\n F_up = []\n with tf.variable_scope('resnet_scene_segmentation_head') as sc:\n fdim = base_fdim\n features = F[-1]\n\n features = nearest_upsample_block(4, inputs, features, 'nearest_upsample_0')\n features = tf.concat((features, F[3]), axis=1)\n features = conv1d_1x1(features, 8 * fdim, 'up_conv0', is_training=is_training, with_bias=False, init=init, # 2^3 * fdim\n weight_decay=weight_decay, activation_fn=activation_fn, bn=bn, bn_momentum=bn_momentum,\n bn_eps=bn_eps)\n F_up.append(features)\n\n features = nearest_upsample_block(3, inputs, features, 'nearest_upsample_1')\n features = tf.concat((features, F[2]), axis=1)\n features = conv1d_1x1(features, 4 * fdim, 'up_conv1', is_training=is_training, with_bias=False, init=init,\n weight_decay=weight_decay, activation_fn=activation_fn, bn=bn, bn_momentum=bn_momentum,\n bn_eps=bn_eps)\n F_up.append(features)\n\n features = nearest_upsample_block(2, inputs, features, 'nearest_upsample_2')\n features = tf.concat((features, F[1]), axis=1)\n features = conv1d_1x1(features, 2 * fdim, 'up_conv2', is_training=is_training, with_bias=False, init=init,\n weight_decay=weight_decay, activation_fn=activation_fn, bn=bn, bn_momentum=bn_momentum,\n bn_eps=bn_eps)\n F_up.append(features)\n\n features = nearest_upsample_block(1, inputs, features, 'nearest_upsample_3')\n features = tf.concat((features, F[0]), axis=1)\n features = conv1d_1x1(features, fdim, 'up_conv3', is_training=is_training, with_bias=False, init=init,\n weight_decay=weight_decay, activation_fn=activation_fn, bn=bn, bn_momentum=bn_momentum,\n bn_eps=bn_eps)\n\n F_up.append(features)\n F_up = list(reversed(F_up))\n\n if config.sep_head or config.arch_up:\n # build head with config.arch_out\n return F_up, None\n\n features = conv1d_1x1(features, fdim, 'segmentation_head', is_training=is_training, with_bias=False, init=init,\n weight_decay=weight_decay, activation_fn=activation_fn, bn=bn, bn_momentum=bn_momentum,\n bn_eps=bn_eps)\n logits = conv1d_1x1(features, config.num_classes, 'segmentation_pred', is_training=is_training, with_bias=True,\n init=init, weight_decay=weight_decay, activation_fn=None, bn=False)\n return F_up, (features, logits)" }, { "identifier": "resnet_classification_head", "path": "models/heads/cls_head.py", "snippet": "def resnet_classification_head(config,\n inputs,\n features,\n base_fdim,\n is_training,\n pooling='avg',\n init='xavier',\n weight_decay=0,\n activation_fn='relu',\n bn=True,\n bn_momentum=0.98,\n bn_eps=1e-3):\n \"\"\"A head for shape classification with resnet backbone.\n\n Args:\n config: config file\n inputs: a dict contains all inputs\n features: input features\n base_fdim: the base feature dim\n is_training: True indicates training phase\n pooling: global pooling type, avg or max\n init: weight initialization method\n weight_decay: If > 0, add L2Loss weight decay multiplied by this float.\n activation_fn: Activation function\n bn: If True, add batch norm after convolution\n\n Returns:\n prediction logits [batch_size, num_classes]\n \"\"\"\n with tf.variable_scope('resnet_classification_head') as sc:\n fdim = base_fdim\n if pooling == 'avg':\n features = global_average_block(inputs, features, 'global_avg_pool')\n elif pooling == 'max':\n features = global_max_block(inputs, features, 'global_max_pool')\n else:\n raise NotImplementedError(f\"{pooling} not supported in resnet_classification_head\")\n\n features = conv1d_1x1(features, 16 * fdim, 'fc1', is_training=is_training, with_bias=False, init=init,\n weight_decay=weight_decay, activation_fn=activation_fn, bn=bn, bn_momentum=bn_momentum,\n bn_eps=bn_eps)\n features = dropout(features, keep_prob=0.5, is_training=is_training, scope='dp1')\n\n features = conv1d_1x1(features, 8 * fdim, 'fc2', is_training=is_training, with_bias=False, init=init,\n weight_decay=weight_decay, activation_fn=activation_fn, bn=bn, bn_momentum=bn_momentum,\n bn_eps=bn_eps)\n features = dropout(features, keep_prob=0.5, is_training=is_training, scope='dp2')\n\n features = conv1d_1x1(features, 4 * fdim, 'fc3', is_training=is_training, with_bias=False, init=init,\n weight_decay=weight_decay, activation_fn=activation_fn, bn=bn, bn_momentum=bn_momentum,\n bn_eps=bn_eps)\n features = dropout(features, keep_prob=0.5, is_training=is_training, scope='dp3')\n\n logits = conv1d_1x1(features, config.num_classes, 'logit', is_training=is_training, with_bias=True, init=init,\n weight_decay=weight_decay, activation_fn=None, bn=False)\n return logits" }, { "identifier": "resnet_backbone", "path": "models/backbone/resnet.py", "snippet": "def resnet_backbone(config,\n inputs,\n features,\n base_radius,\n base_fdim,\n bottleneck_ratio,\n depth,\n is_training,\n init='xavier',\n weight_decay=0,\n activation_fn='relu',\n bn=True,\n bn_momentum=0.98,\n bn_eps=1e-3):\n \"\"\"Resnet Backbone\n\n Args:\n config: config file\n inputs: a dict contains all inputs\n features: input features\n base_radius: the first ball query radius\n base_fdim: the base feature dim\n bottleneck_ratio: bottleneck_ratio\n depth: num of bottleneck in a stage\n is_training: True indicates training phase\n init: weight initialization method\n weight_decay: If > 0, add L2Loss weight decay multiplied by this float.\n activation_fn: Activation function\n bn: If True, add batch norm after convolution\n\n Returns:\n A list of all stage features\n \"\"\"\n with tf.variable_scope('resnet_backbone') as sc:\n fdim = base_fdim\n radius = base_radius\n layer_idx = 0\n F = []\n features = conv1d_1x1(features, fdim, 'res1_input_conv', is_training=is_training, with_bias=False, init=init,\n weight_decay=weight_decay, activation_fn=activation_fn, bn=bn, bn_momentum=bn_momentum,\n bn_eps=bn_eps)\n\n features = simple_block(layer_idx, config, inputs, features, 'res1_simple_block',\n radius=radius, out_fdim=fdim, is_training=is_training,\n init=init, weight_decay=weight_decay, activation_fn=activation_fn, bn=bn,\n bn_momentum=bn_momentum, bn_eps=bn_eps)\n for i in range(depth):\n features = bottleneck(layer_idx, config, inputs, features, f'res1_bottleneck{i}',\n radius=radius, out_fdim=2 * fdim, bottleneck_ratio=bottleneck_ratio,\n is_training=is_training,\n init=init, weight_decay=weight_decay, activation_fn=activation_fn, bn=bn,\n bn_momentum=bn_momentum,\n bn_eps=bn_eps)\n F += [features]\n layer_idx += 1\n features = strided_bottleneck(layer_idx - 1, config, inputs, features, 'res2_strided_bottleneck',\n radius=radius, out_fdim=4 * fdim, bottleneck_ratio=bottleneck_ratio,\n is_training=is_training,\n init=init, weight_decay=weight_decay, activation_fn=activation_fn, bn=bn,\n bn_momentum=bn_momentum,\n bn_eps=bn_eps)\n for i in range(depth):\n features = bottleneck(layer_idx, config, inputs, features, f'res2_bottleneck{i}',\n radius=2 * radius, out_fdim=4 * fdim, bottleneck_ratio=bottleneck_ratio,\n is_training=is_training,\n init=init, weight_decay=weight_decay, activation_fn=activation_fn, bn=bn,\n bn_momentum=bn_momentum,\n bn_eps=bn_eps)\n F += [features]\n layer_idx += 1\n features = strided_bottleneck(layer_idx - 1, config, inputs, features, 'res3_strided_bottleneck',\n radius=2 * radius, out_fdim=8 * fdim, bottleneck_ratio=bottleneck_ratio,\n is_training=is_training,\n init=init, weight_decay=weight_decay, activation_fn=activation_fn, bn=bn,\n bn_momentum=bn_momentum,\n bn_eps=bn_eps)\n for i in range(depth):\n features = bottleneck(layer_idx, config, inputs, features, f'res3_bottleneck{i}',\n radius=4 * radius, out_fdim=8 * fdim, bottleneck_ratio=bottleneck_ratio,\n is_training=is_training,\n init=init, weight_decay=weight_decay, activation_fn=activation_fn, bn=bn,\n bn_momentum=bn_momentum,\n bn_eps=bn_eps)\n F += [features]\n layer_idx += 1\n features = strided_bottleneck(layer_idx - 1, config, inputs, features, 'res4_strided_bottleneck',\n radius=4 * radius, out_fdim=16 * fdim, bottleneck_ratio=bottleneck_ratio,\n is_training=is_training,\n init=init, weight_decay=weight_decay, activation_fn=activation_fn, bn=bn,\n bn_momentum=bn_momentum,\n bn_eps=bn_eps)\n for i in range(depth):\n features = bottleneck(layer_idx, config, inputs, features, f'res4_bottleneck{i}',\n radius=8 * radius, out_fdim=16 * fdim, bottleneck_ratio=bottleneck_ratio,\n is_training=is_training,\n init=init, weight_decay=weight_decay, activation_fn=activation_fn, bn=bn,\n bn_momentum=bn_momentum,\n bn_eps=bn_eps)\n F += [features]\n layer_idx += 1\n features = strided_bottleneck(layer_idx - 1, config, inputs, features, 'res5_strided_bottleneck',\n radius=8 * radius, out_fdim=32 * fdim, bottleneck_ratio=bottleneck_ratio,\n is_training=is_training,\n init=init, weight_decay=weight_decay, activation_fn=activation_fn, bn=bn,\n bn_momentum=bn_momentum,\n bn_eps=bn_eps)\n for i in range(depth):\n features = bottleneck(layer_idx, config, inputs, features, f'res5_bottleneck{i}',\n radius=16 * radius, out_fdim=32 * fdim, bottleneck_ratio=bottleneck_ratio,\n is_training=is_training,\n init=init, weight_decay=weight_decay, activation_fn=activation_fn, bn=bn,\n bn_momentum=bn_momentum,\n bn_eps=bn_eps)\n\n # layer_idx = 4, out_fdim = 2 ** (layer_idx+1) * fdim, radius [stride/] = 2**(layer_idx-1) / 2**layer_idx\n if config.num_layers != 5:\n assert config.num_layers > 5, f'unsupported num_layers = {config.num_layers} in resnet backbone'\n for nl in range(6, config.num_layers + 1):\n F += [features]\n layer_idx = nl - 1\n features = strided_bottleneck(layer_idx - 1, config, inputs, features, f'res{nl}_strided_bottleneck',\n radius=(layer_idx - 1) ** 2 * radius, out_fdim=2 ** nl * fdim, bottleneck_ratio=bottleneck_ratio,\n is_training=is_training,\n init=init, weight_decay=weight_decay, activation_fn=activation_fn, bn=bn,\n bn_momentum=bn_momentum,\n bn_eps=bn_eps)\n for i in range(depth):\n features = bottleneck(layer_idx, config, inputs, features, f'res{nl}_bottleneck{i}',\n radius=layer_idx ** 2 * radius, out_fdim=2 ** nl * fdim, bottleneck_ratio=bottleneck_ratio,\n is_training=is_training,\n init=init, weight_decay=weight_decay, activation_fn=activation_fn, bn=bn,\n bn_momentum=bn_momentum,\n bn_eps=bn_eps)\n F += [features]\n\n return F" }, { "identifier": "get_block_ops", "path": "models/blocks.py", "snippet": "def get_block_ops(block_n, raise_not_found=True):\n\n # resnet bottleneck w/o strided\n if block_n.startswith('resnetb'):\n block_ops = bottleneck\n\n # mlps\n elif block_n in ['unary', 'linear']:\n block_ops = unary_block\n\n # simple aggregation\n elif block_n.startswith('agg') or block_n.startswith('pool') or block_n in ['distconv']:\n block_ops = agg_block\n\n # sampling\n elif 'sample' in block_n:\n block_ops = globals()[f'{block_n}_block']\n\n # lfa\n elif block_n == 'lfa':\n block_ops = lfa_block\n\n elif block_n.startswith('attention'):\n block_ops = attention_block\n\n # raise or skip\n elif raise_not_found:\n raise NotImplementedError(f'not supported block_n = {block_n}')\n else:\n block_ops = None\n return block_ops" }, { "identifier": "apply_block_ops", "path": "models/blocks.py", "snippet": "@tf_scope\ndef apply_block_ops(features, d_out, inputs, stage_n, stage_i, block_cfg, config, is_training):\n block_ops = get_block_ops(block_cfg.name)\n features = block_ops(features, d_out, inputs, stage_n, stage_i, block_cfg, config, is_training)\n return features" }, { "identifier": "apply_head_ops", "path": "models/head.py", "snippet": "def apply_head_ops(inputs, head_cfg, config, is_training):\n head_ops = get_head_ops(head_cfg.head_n)\n rst = head_ops(inputs, head_cfg, config, is_training)\n return rst" }, { "identifier": "tf_scope", "path": "models/utils.py", "snippet": "def tf_scope(func):\n \"\"\" decorator: automatically wrap a var scope \"\"\"\n def scopped_func(*args, name=None, reuse=None, **kwargs):\n if name is not None and not reuse:\n with tf.variable_scope(name):\n return func(*args, **kwargs)\n elif name is not None and reuse: # variable reuse, naming ops as desired\n with tf.variable_scope(reuse, auxiliary_name_scope=False, reuse=True):\n with tf.name_scope(name):\n return func(*args, **kwargs)\n elif reuse: # variable reuse + naming ops as is re-enter the scope\n with tf.variable_scope(reuse, reuse=True):\n return func(*args, **kwargs)\n else:\n return func(*args, **kwargs)\n return scopped_func" } ]
import os, re, sys, copy, warnings import tensorflow as tf from collections import defaultdict from config import log_config, load_config, get_block_cfg from utils.logger import print_dict from .heads import resnet_classification_head, resnet_scene_segmentation_head, resnet_multi_part_segmentation_head from .backbone import resnet_backbone from .blocks import get_block_ops, apply_block_ops from .head import apply_head_ops from .utils import tf_scope from .basic_operators import * from ops import TF_OPS
10,804
# main_n: loss_dict.pop(main_n), **loss_dict, } head_dict['loss'] = loss_dict return loss_dict class SceneSegModel(Model): def __init__(self, flat_inputs, is_training, config, scope=None, verbose=True): self.config = config self.is_training = is_training self.scope = scope self.verbose = verbose with tf.variable_scope('inputs'): self.inputs = self.get_inputs(flat_inputs) self.num_layers = config.num_layers self.labels = self.inputs['point_labels'] self.down_list = [{'p_sample': None, 'f_sample': None, 'p_out': None, 'f_out': None} for i in range(self.num_layers)] self.up_list = [{'p_sample': None, 'f_sample': None, 'p_out': None, 'f_out': None} for i in range(self.num_layers)] self.stage_list = self.inputs['stage_list'] = {'down': self.down_list, 'up': self.up_list} self.head_dict = self.inputs['head_dict'] = {'loss': {}, 'result': {}, 'config': {}} for i, p in enumerate(self.inputs['points']): # fill points self.down_list[i]['p_out'] = p # up 0 = the most upsampled, num_layers-1 the upsampled pt from the most downsampled self.up_list[i]['p_out'] = p if i < self.num_layers - 1 else None if config.dense_by_conv: dense_layer.config = config with tf.variable_scope('model'): fdim = config.first_features_dim r = config.first_subsampling_dl * config.density_parameter features = self.inputs['features'] F = resnet_backbone(config, self.inputs, features, base_radius=r, base_fdim=fdim, bottleneck_ratio=config.bottleneck_ratio, depth=config.depth, is_training=is_training, init=config.init, weight_decay=config.weight_decay, activation_fn=config.activation_fn, bn=True, bn_momentum=config.bn_momentum, bn_eps=config.bn_eps) F_up, head = resnet_scene_segmentation_head(config, self.inputs, F, base_fdim=fdim, is_training=is_training, init=config.init, weight_decay=config.weight_decay, activation_fn=config.activation_fn, bn=True, bn_momentum=config.bn_momentum, bn_eps=config.bn_eps) for i, p in enumerate(self.inputs['points']): # fill features self.down_list[i]['f_out'] = F[i] # F_up reversed - 0 = the most upsampled, num_layers-1 the upsampled pt from the most downsampled self.up_list[i]['f_out'] = F_up[i] if i < len(F_up) else None self.up_list[-1] = self.down_list[-1] # align the most-downsampled layer if head is not None: latent, logits = head self.up_list[0]['latent'] = latent self.up_list[0]['logits'] = logits self.head_dict = self.build_head(self.config.arch_out, verbose=verbose) self.loss_dict = self.build_loss(scope) return class ModelBuilder(Model): def __init__(self, flat_inputs, is_training, config, scope=None, verbose=True): self.config = config self.is_training = is_training self.scope = scope # variable scope - potential sharing across devices (e.g. gpus) self.verbose = verbose with tf.variable_scope('inputs'): self.inputs = self.get_inputs(flat_inputs) self.num_layers = config.num_layers self.labels = self.inputs['point_labels'] with tf.variable_scope('model'): self.head_dict = self.build_model_plain_split() self.loss_dict = self.build_loss(scope=scope) return def build_model_plain_split(self): """ detect down-/up-sample via ops => architecture = [ops, ...] """ config = self.config self.down_list = [{'p_sample': None, 'f_sample': None, 'p_out': None, 'f_out': None} for i in range(self.num_layers)] self.up_list = [{'p_sample': None, 'f_sample': None, 'p_out': None, 'f_out': None} for i in range(self.num_layers)] if self.num_layers > 0 else self.down_list self.stage_list = {'down': self.down_list, 'up': self.up_list} self.head_dict = {'loss': {}, 'result': {}, 'config': {}} inputs = self.inputs inputs['stage_list'] = self.stage_list inputs['head_dict'] = self.head_dict # split arch: input -> main -> output if '__input__' in config.architecture and '__output__' in config.architecture: arch_in = config.architecture[:config.architecture.index('__input__')] arch_main = config.architecture[len(arch_in) + 1:config.architecture.index('__output__')] arch_out = config.architecture[config.architecture.index('__output__') + 1:] else: arch_in = config.arch_in arch_main = config.arch_main arch_out = config.arch_out assert len(arch_in) and len(arch_out), f'invalid split of architecture {config.architecture}' arch_in = [get_block_cfg(blk) if isinstance(blk, str) else blk for blk in arch_in] arch_main = [get_block_cfg(blk) if isinstance(blk, str) else blk for blk in arch_main] arch_out = [load_config(dataset_name='head', cfg_name=a) for a in arch_out] # arch input features = inputs['features'] self.prepare_points('', 0, inputs, config) arch_in_dims = config.arch_in_dims if config.arch_in_dims else [config.first_features_dim] * len(arch_in) if self.verbose: print(f'\n\n==== inputs') print_dict(inputs, prefix='\t', except_k=['stage_list']) print('\n\n==== arch input') for block_i, (block_cfg, d_out) in enumerate(zip(arch_in, arch_in_dims)): with tf.variable_scope(f'input/{block_cfg.name}_{block_i}'):
if tf.__version__.split('.')[0] == '2': tf = tf.compat.v1 tf.disable_v2_behavior() BASE_DIR = os.path.dirname(os.path.abspath(__file__)) ROOT_DIR = os.path.dirname(BASE_DIR) sys.path.insert(0, ROOT_DIR) class Model(object): def get_inputs(self, inputs): config = self.config if isinstance(inputs, dict): pass else: flat_inputs = inputs self.inputs = dict() self.inputs['points'] = flat_inputs[:config.num_layers] self.inputs['neighbors'] = flat_inputs[config.num_layers:2 * config.num_layers] self.inputs['pools'] = flat_inputs[2 * config.num_layers:3 * config.num_layers] self.inputs['upsamples'] = flat_inputs[3 * config.num_layers:4 * config.num_layers] ind = 4 * config.num_layers self.inputs['features'] = flat_inputs[ind] ind += 1 self.inputs['batch_weights'] = flat_inputs[ind] ind += 1 self.inputs['in_batches'] = flat_inputs[ind] ind += 1 self.inputs['out_batches'] = flat_inputs[ind] ind += 1 self.inputs['point_labels'] = flat_inputs[ind] ind += 1 self.inputs['augment_scales'] = flat_inputs[ind] ind += 1 self.inputs['augment_rotations'] = flat_inputs[ind] ind += 1 self.inputs['point_inds'] = flat_inputs[ind] ind += 1 self.inputs['cloud_inds'] = flat_inputs[ind] inputs = self.inputs for k in ['points', 'neighbors', 'pools', 'upsamples']: inputs[k] = [i if i is not None and i.shape.as_list()[0] != 0 else None for i in inputs[k]] inputs['sample_idx'] = { 'down': inputs['pools'], 'up': inputs['upsamples'] } if 'batches_len' in inputs: if 'batches_stack' not in inputs: inputs['batches_stack'] = [inputs['in_batches']] + [None] * (config.num_layers - 2) + [inputs['out_batches']] if 'batches_ind' not in inputs: inputs['batches_ind'] = [inputs['in_batch_inds']] + [None] * (config.num_layers - 1) if '_glb' not in inputs: inputs['_glb'] = {} # per-model/device global storage # inputs['assert_ops'] = [] return inputs def get_result(self): # keys=['logits', 'probs', 'labels'] # head_rst = {h: {k: d[k] for k in keys if k in d} for h, d in self.head_dict['result'].items()} head_rst = self.head_dict['result'] rst = { # {head/task: {probs, labels}, ..., 'inputs': input related} **head_rst, 'inputs': { 'point_inds': self.inputs['point_inds'], 'cloud_inds': self.inputs['cloud_inds'], } } for k in ['batches_len']: if k in self.inputs: rst['inputs'][k] = self.inputs[k] return rst def get_loss(self): return self.loss_dict """ TODO: to check - multiple keys indexing the inputs['point_labels'] should be having the same id in rst - ensure only one tensor passed from gpu to cpu <= """ @tf_scope def build_backbone(self, features, block_list, verbose=True): # building backbone blocks inputs = self.inputs config = self.config num_layers = config.num_layers def is_new_stage(blk): if any([k in blk for k in ['pool', 'strided']]): return 'down' elif any([k in blk for k in ['upsample']]): return 'up' else: return '' if 'stage_list' not in inputs: down_list = [{'p_sample': None, 'f_sample': None, 'p_out': None, 'f_out': None} for i in range(num_layers)] up_list = [{'p_sample': None, 'f_sample': None, 'p_out': None, 'f_out': None} for i in range(num_layers)] if num_layers > 0 else down_list stage_list = {'down': down_list, 'up': up_list} else: stage_list = inputs['stage_list'] down_list, up_list = stage_list['down'], stage_list['up'] inputs['stage_list'] = stage_list # backbone - init setting stage_i = 0 block_i = 0 stage_sc = 'down' F_list = down_list F_list[stage_i]['p_sample'] = inputs['points'][stage_i] F_list[stage_i]['f_sample'] = features d_out = config.architecture_dims[0] if verbose: print(f'\n\n==== {stage_sc}_{stage_i} - arch main') for block_cfg in block_list: block_n = block_cfg.name stage_n = is_new_stage(block_n) # change stage - indexing the stage after down/up-sampling ops if stage_n: if verbose: print('---- pts & features') print_dict(F_list[stage_i], prefix='\t') # update if stage_n == 'down': stage_i += 1 elif stage_n == 'up': stage_i -= 1 else: raise NotImplementedError(f'non supported stage name {stage_n}') # prepare block_i = 0 stage_sc = stage_n F_list = stage_list[stage_n] d_out = config.architecture_dims[stage_i] kr = config.kr_search[stage_i] self.prepare_points(stage_n, stage_i, inputs, config, name=f'{stage_sc}_{stage_i}') if verbose: print(f'\n\n==== {stage_sc}_{stage_i} - arch main') print_dict({k: v[stage_i] for k, v in inputs.items() if isinstance(v, tuple)}, prefix='\t') print(f'\td_out = {d_out}; kr = {kr}\n') if verbose: log_config(block_cfg) # special block if block_n.startswith('__') and block_n.endswith('__'): if block_n == '__up__': block_i = 0 stage_sc = 'up' F_list = up_list F_list[stage_i]['p_sample'] = inputs['points'][stage_i] F_list[stage_i]['f_sample'] = features else: raise ValueError(f'not supported special block {block_n}') # block ops else: with tf.variable_scope(f'{stage_sc}_{stage_i}/{block_n}_{block_i}'): block_ops = get_block_ops(block_n) features = block_ops(features, d_out, inputs, stage_n, stage_i, block_cfg, config, self.is_training) block_i += 1 if verbose: print(f'{block_n}_{block_i}\t{features}') # save the sampled pt/feature (1st block to sample the p_in/f_in of a stage) # NOTE update of inputs done in the ops - e.g. changing pt dyanmically based on feature & spatial sampling in inputs if stage_n: F_list[stage_i]['p_sample'] = inputs['points'][stage_i] F_list[stage_i]['f_sample'] = features # save as last block F_list[stage_i]['p_out'] = inputs['points'][stage_i] F_list[stage_i]['f_out'] = features # align most downsampled stage in up-down? if all(v == None for k, v in up_list[-1].items()): up_list[-1] = down_list[-1] if verbose: print('---- pts & features') print_dict(F_list[stage_i], prefix='\t') print_dict({'\nstage list =': stage_list}) return stage_list @tf_scope def prepare_points(self, stage_n, stage_i, inputs, config): # fixed sampling & searching on points - preparing inputs for next stage # (may otherwise be specified as block) stage_list = inputs['stage_list'] assert stage_n in ['up', 'down', ''], f'should not invoke prepare_points with stage_n=\'{stage_n}\'' # if config.debug: # print_dict(inputs, head=f'{stage_n}-{stage_i}') # print(stage_n == 'down' and inputs['points'][stage_i] is None and config.sample in TF_OPS.fix_sample) # print(stage_n == 'down' and inputs['neighbors'][stage_i] is None and config.search in TF_OPS.fix_search) # print(stage_n == 'down' and inputs['sample_idx']['down'][stage_i] is None and config.search in TF_OPS.fix_search) # print(stage_n == 'up' and inputs['sample_idx']['up'][stage_i] is None and config.search in TF_OPS.fix_search) # downsampling if stage_n == 'down' and inputs['points'][stage_i] is None and config.sample in TF_OPS.fix_sample: stage_last = stage_i - 1 # last downsampled stage # stage_last = len([i for i in inputs['points'] if i is not None]) points = stage_list['down'][stage_last]['p_out'] batches_len = inputs['batches_len'][stage_last] if 'batches_len' in inputs else None r = config.r_sample[stage_last] rst = TF_OPS.tf_fix_sample(points, r, config.sample, batches_len, verbose=False, name=config.sample) if 'batches_len' in inputs: inputs['points'][stage_i], inputs['batches_len'][stage_i] = rst else: inputs['points'][stage_i] = rst # neighborhood search if inputs['neighbors'][stage_i] is None and config.search in TF_OPS.fix_search: points = inputs['points'][stage_i] # current stage batches_len = inputs['batches_len'][stage_i] if 'batches_len' in inputs else None kr = config.kr_search[stage_i] inputs['neighbors'][stage_i] = TF_OPS.tf_fix_search(points, points, kr, config.search, batches_len, batches_len, name=config.search) # downsampling - pool if stage_n == 'down' and inputs['sample_idx']['down'][stage_i - 1] is None and config.search in TF_OPS.fix_search: stage_last = stage_i - 1 # last downsampled stage queries, supports = inputs['points'][stage_i], stage_list['down'][stage_last]['p_out'] queries_len = supports_len = None if 'batches_len' in inputs: queries_len, supports_len = inputs['batches_len'][stage_i], inputs['batches_len'][stage_last] kr = config.kr_sample[stage_last] inputs['sample_idx']['down'][stage_last] = TF_OPS.tf_fix_search(queries, supports, kr, config.search, queries_len, supports_len, name=f'{config.search}_down') # upsampling - unpool elif stage_n == 'up' and inputs['sample_idx']['up'][stage_i + 1] is None and config.search in TF_OPS.fix_search: stage_last = stage_i + 1 - config.num_layers # last upsampled stage # stage_last = [i for i, stage_d in enumerate(stage_list['up']) if stage_d['p_out'] is not None] # stage_last = stage_last[0] if stage_last else -1 queries = stage_list['down'][stage_i]['p_out'] supports = stage_list['up'][stage_last]['p_out'] supports = supports if supports is not None else stage_list['down'][-1]['p_out'] # or, the most downsampled queries_len = supports_len = None if 'batches_len' in inputs: queries_len, supports_len = inputs['batches_len'][stage_i], inputs['batches_len'][stage_last] kr = config.kr_sample_up[stage_last] inputs['sample_idx']['up'][stage_last] = TF_OPS.tf_fix_search(queries, supports, kr, config.search, queries_len, supports_len, name=f'{config.search}_up') # if self.config.debug: # print_dict(inputs, head=f'{stage_n}-{stage_i} - prepared', except_k='stage_list') # print('-' * 60) return @tf_scope def build_head(self, head_list, verbose=True): # building ouput heads & losses head_dict = self.inputs['head_dict'] if 'head_dict' in self.inputs else {'loss': {}, 'result': {}, 'config': {}} head_list = head_list if isinstance(head_list, (tuple, list)) else [head_list] head_list = [load_config(dataset_name='head', cfg_name=h) if isinstance(h, str) else h for h in head_list] if verbose: print('\n\n==== arch output') for head_cfg in head_list: if verbose: log_config(head_cfg) # if self.config.debug: # print_dict(self.inputs) with tf.variable_scope(f'output/{head_cfg.head_n}'): head_rst = apply_head_ops(self.inputs, head_cfg, self.config, self.is_training) if verbose: print_dict(head_rst) # loss head_k = head_cfg.task if head_cfg.task else head_cfg.head_n # head for specified task, or head_n as key by default loss_keys = ['loss',] for k in loss_keys: head_rst_d = head_rst[k] if isinstance(head_rst[k], dict) else {head_k: head_rst[k]} # use returned dict if provided joint = head_dict[k].keys() & head_rst_d.keys() assert len(joint) == 0, f'head rst {k} has overlapping keys {joint}' head_dict[k].update(head_rst_d) # result rst_keys = ['logits', 'probs', 'labels',] head_rst_d = {k: head_rst[k] for k in head_rst if k not in loss_keys} assert head_cfg.head_n not in head_dict['result'], f'duplicate head {head_cfg.head_n} in dict' assert set(head_rst_d.keys()).issuperset(set(rst_keys)), f'must include keys {rst_keys}, but given {head_rst_d.keys()}' head_dict['result'][head_cfg.head_n] = head_rst_d if head_k and head_k != head_cfg.head_n: # get the task head - flat & overridable if head_k in head_dict['result']: warnings.warn(f'duplicate task head {head_k} in dict, override by {head_cfg.head_n}') head_dict['result'][head_k] = {k: head_rst_d[k][head_k] if isinstance(head_rst_d[k], dict) else head_rst_d[k] for k in head_rst_d} # config head_dict['config'][head_cfg.head_n] = head_cfg head_dict['config'][head_k] = head_cfg if verbose: print('\n\n') return head_dict @tf_scope def build_loss(self, scope=None, head_dict=None): # finalizing loss_dict if head_dict is None: head_dict = self.head_dict loss_dict = head_dict['loss'] sum_fn = tf.accumulate_n if len(self.config.gpu_devices) else tf.add_n # accumulate_n seems not working with cpu-only # get the collection, filtering by 'scope' l2_loss = tf.get_collection('weight_losses', scope) if l2_loss and self.config.optimizer not in ['adamW']: loss_dict['l2_loss'] = sum_fn(l2_loss, name='l2_loss') # L2 # sum total loss loss = sum_fn(list(loss_dict.values()), name='loss') # reconstruct loss dict - reorder & incldue total loss main_n = {'seg': ['S3DIS', 'ScanNet', 'Semantic3D', 'NPM3D', 'ShapeNet', 'PartNet', 'SensatUrban', 'SemanticKITTI']} main_n = {v: k for k, lst in main_n.items() for v in lst}[self.config.dataset] loss_dict = { 'loss': loss, # # should have one and only one 'main' loss # # TODO: may introduce cls & seg head at the same time? => each task a main? # main_n: loss_dict.pop(main_n), **loss_dict, } head_dict['loss'] = loss_dict return loss_dict class SceneSegModel(Model): def __init__(self, flat_inputs, is_training, config, scope=None, verbose=True): self.config = config self.is_training = is_training self.scope = scope self.verbose = verbose with tf.variable_scope('inputs'): self.inputs = self.get_inputs(flat_inputs) self.num_layers = config.num_layers self.labels = self.inputs['point_labels'] self.down_list = [{'p_sample': None, 'f_sample': None, 'p_out': None, 'f_out': None} for i in range(self.num_layers)] self.up_list = [{'p_sample': None, 'f_sample': None, 'p_out': None, 'f_out': None} for i in range(self.num_layers)] self.stage_list = self.inputs['stage_list'] = {'down': self.down_list, 'up': self.up_list} self.head_dict = self.inputs['head_dict'] = {'loss': {}, 'result': {}, 'config': {}} for i, p in enumerate(self.inputs['points']): # fill points self.down_list[i]['p_out'] = p # up 0 = the most upsampled, num_layers-1 the upsampled pt from the most downsampled self.up_list[i]['p_out'] = p if i < self.num_layers - 1 else None if config.dense_by_conv: dense_layer.config = config with tf.variable_scope('model'): fdim = config.first_features_dim r = config.first_subsampling_dl * config.density_parameter features = self.inputs['features'] F = resnet_backbone(config, self.inputs, features, base_radius=r, base_fdim=fdim, bottleneck_ratio=config.bottleneck_ratio, depth=config.depth, is_training=is_training, init=config.init, weight_decay=config.weight_decay, activation_fn=config.activation_fn, bn=True, bn_momentum=config.bn_momentum, bn_eps=config.bn_eps) F_up, head = resnet_scene_segmentation_head(config, self.inputs, F, base_fdim=fdim, is_training=is_training, init=config.init, weight_decay=config.weight_decay, activation_fn=config.activation_fn, bn=True, bn_momentum=config.bn_momentum, bn_eps=config.bn_eps) for i, p in enumerate(self.inputs['points']): # fill features self.down_list[i]['f_out'] = F[i] # F_up reversed - 0 = the most upsampled, num_layers-1 the upsampled pt from the most downsampled self.up_list[i]['f_out'] = F_up[i] if i < len(F_up) else None self.up_list[-1] = self.down_list[-1] # align the most-downsampled layer if head is not None: latent, logits = head self.up_list[0]['latent'] = latent self.up_list[0]['logits'] = logits self.head_dict = self.build_head(self.config.arch_out, verbose=verbose) self.loss_dict = self.build_loss(scope) return class ModelBuilder(Model): def __init__(self, flat_inputs, is_training, config, scope=None, verbose=True): self.config = config self.is_training = is_training self.scope = scope # variable scope - potential sharing across devices (e.g. gpus) self.verbose = verbose with tf.variable_scope('inputs'): self.inputs = self.get_inputs(flat_inputs) self.num_layers = config.num_layers self.labels = self.inputs['point_labels'] with tf.variable_scope('model'): self.head_dict = self.build_model_plain_split() self.loss_dict = self.build_loss(scope=scope) return def build_model_plain_split(self): """ detect down-/up-sample via ops => architecture = [ops, ...] """ config = self.config self.down_list = [{'p_sample': None, 'f_sample': None, 'p_out': None, 'f_out': None} for i in range(self.num_layers)] self.up_list = [{'p_sample': None, 'f_sample': None, 'p_out': None, 'f_out': None} for i in range(self.num_layers)] if self.num_layers > 0 else self.down_list self.stage_list = {'down': self.down_list, 'up': self.up_list} self.head_dict = {'loss': {}, 'result': {}, 'config': {}} inputs = self.inputs inputs['stage_list'] = self.stage_list inputs['head_dict'] = self.head_dict # split arch: input -> main -> output if '__input__' in config.architecture and '__output__' in config.architecture: arch_in = config.architecture[:config.architecture.index('__input__')] arch_main = config.architecture[len(arch_in) + 1:config.architecture.index('__output__')] arch_out = config.architecture[config.architecture.index('__output__') + 1:] else: arch_in = config.arch_in arch_main = config.arch_main arch_out = config.arch_out assert len(arch_in) and len(arch_out), f'invalid split of architecture {config.architecture}' arch_in = [get_block_cfg(blk) if isinstance(blk, str) else blk for blk in arch_in] arch_main = [get_block_cfg(blk) if isinstance(blk, str) else blk for blk in arch_main] arch_out = [load_config(dataset_name='head', cfg_name=a) for a in arch_out] # arch input features = inputs['features'] self.prepare_points('', 0, inputs, config) arch_in_dims = config.arch_in_dims if config.arch_in_dims else [config.first_features_dim] * len(arch_in) if self.verbose: print(f'\n\n==== inputs') print_dict(inputs, prefix='\t', except_k=['stage_list']) print('\n\n==== arch input') for block_i, (block_cfg, d_out) in enumerate(zip(arch_in, arch_in_dims)): with tf.variable_scope(f'input/{block_cfg.name}_{block_i}'):
features = apply_block_ops(features, d_out, inputs, '', 0, block_cfg, config, self.is_training)
9
2023-10-13 08:03:07+00:00
16k
YingqingHe/ScaleCrafter-ptl
scripts/txt2img.py
[ { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", device=torch.device(\"cuda\"), **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n self.device = device\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != self.device:\n attr = attr.to(self.device)\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule,\n **kwargs\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None, **kwargs):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n timestep_index=i,\n **kwargs)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None, \n # redilation\n dilate=None, dilate_tau=None, dilate_skip=None, \n progress_dilate=False,\n dilate_cfg=None, dilate_cfg_skip=None,\n timestep_index=None,\n **kwargs):\n b, *_, device = *x.shape, x.device\n \n # redilation\n enable_dilate = (dilate is not None)\n if enable_dilate:\n if (self.ddim_timesteps.shape[0]-index) > dilate_tau:\n # close dilation in later denoising\n enable_dilate = False\n else:\n if progress_dilate:\n # adjust the dilation factor progressively\n assert(timestep_index is not None)\n dilate_list = list(range(2, math.ceil(dilate)+1))[::-1]\n n_stage = len(dilate_list)\n n_times_stage = math.ceil(dilate_tau / n_stage)\n stage_index = (timestep_index+1) // n_times_stage\n if stage_index > n_stage-1:\n stage_index = n_stage-1\n dilate = dilate_list[stage_index]\n make_dilate_model(self.model, enable_dilate=enable_dilate, dilate=dilate, nskip=dilate_skip)\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" }, { "identifier": "PLMSSampler", "path": "ldm/models/diffusion/plms.py", "snippet": "class PLMSSampler(object):\n def __init__(self, model, schedule=\"linear\", device=torch.device(\"cuda\"), **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n self.device = device\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != self.device:\n attr = attr.to(self.device)\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n if ddim_eta != 0:\n raise ValueError('ddim_eta must be 0 for PLMS')\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for PLMS sampling is {size}')\n\n samples, intermediates = self.plms_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def plms_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running PLMS Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps)\n old_eps = []\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n old_eps=old_eps, t_next=ts_next,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0, e_t = outs\n old_eps.append(e_t)\n if len(old_eps) >= 4:\n old_eps.pop(0)\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n def get_model_output(x, t):\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n return e_t\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n\n def get_x_prev_and_pred_x0(e_t, index):\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n if dynamic_threshold is not None:\n pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n e_t = get_model_output(x, t)\n if len(old_eps) == 0:\n # Pseudo Improved Euler (2nd order)\n x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)\n e_t_next = get_model_output(x_prev, t_next)\n e_t_prime = (e_t + e_t_next) / 2\n elif len(old_eps) == 1:\n # 2nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (3 * e_t - old_eps[-1]) / 2\n elif len(old_eps) == 2:\n # 3nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12\n elif len(old_eps) >= 3:\n # 4nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24\n\n x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)\n\n return x_prev, pred_x0, e_t" }, { "identifier": "DPMSolverSampler", "path": "ldm/models/diffusion/dpm_solver/sampler.py", "snippet": "class DPMSolverSampler(object):\n def __init__(self, model, device=torch.device(\"cuda\"), **kwargs):\n super().__init__()\n self.model = model\n self.device = device\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device)\n self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod))\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != self.device:\n attr = attr.to(self.device)\n setattr(self, name, attr)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n if isinstance(ctmp, torch.Tensor):\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {ctmp.shape[0]} conditionings but batch-size is {batch_size}\")\n else:\n if isinstance(conditioning, torch.Tensor):\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n\n print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}')\n\n device = self.model.betas.device\n if x_T is None:\n img = torch.randn(size, device=device)\n else:\n img = x_T\n\n ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod)\n\n model_fn = model_wrapper(\n lambda x, t, c: self.model.apply_model(x, t, c),\n ns,\n model_type=MODEL_TYPES[self.model.parameterization],\n guidance_type=\"classifier-free\",\n condition=conditioning,\n unconditional_condition=unconditional_conditioning,\n guidance_scale=unconditional_guidance_scale,\n )\n\n dpm_solver = DPM_Solver(model_fn, ns, predict_x0=True, thresholding=False)\n x = dpm_solver.sample(img, steps=S, skip_type=\"time_uniform\", method=\"multistep\", order=2,\n lower_order_final=True)\n\n return x.to(device), None" }, { "identifier": "tiled_vae_decoding", "path": "tiled_decode.py", "snippet": "def tiled_vae_decoding(model, x, window_size, overlap, sync_gn=False):\n \"\"\"\n Args:\n x: latent for decoding\n window_size: (h, w) of tile shape \n overlap: overlapped length between tiles\n sync_gn: sync GN between tiles\n \"\"\"\n assert(overlap % 2 == 0)\n B, C, H, W = x.shape\n h, w = window_size, window_size\n\n if overlap == 0:\n # no overlapped tiling\n if sync_gn:\n x = window_partition(x.permute(0,2,3,1), window_size=window_size).permute(0,3,1,2)\n tiles = [x_.unsqueeze(0) for x_ in x]\n tiles = model.decode_first_stage_tiles(tiles)\n x = torch.cat(tiles, dim=0)\n else:\n x = window_partition(x.permute(0,2,3,1), window_size=window_size).permute(0,3,1,2)\n x = model.decode_first_stage(x)\n return window_reverse(x.permute(0,2,3,1), window_size*8, H*8,W*8).permute(0,3,1,2)\n \n # overlapped tiling\n stride = h-overlap\n n_slices= math.ceil((H - h)/(h-overlap)) + 1\n\n if sync_gn:\n tiles = []\n for i in range(n_slices):\n for j in range(n_slices):\n tiles.append(x[:, :, i*stride:i*stride+h, j*stride:j*stride+h])\n tiles = model.decode_first_stage_tiles(tiles)\n \n outs = []\n for i in range(n_slices):\n for j in range(n_slices):\n tile = remove_overlap(tiles[i*n_slices+j], n_slices, overlap, i, j, h, w)\n outs.append(tile)\n else:\n outs = []\n for i in range(n_slices):\n for j in range(n_slices):\n out = x[:, :, i*stride:i*stride+h, j*stride:j*stride+h]\n out = model.decode_first_stage(out)\n tile = remove_overlap(out, n_slices, overlap, i, j, h, w)\n outs.append(tile)\n # merge tiles\n rows=[]\n for i in range(n_slices):\n rows.append(torch.cat(outs[i*n_slices:(i+1)*n_slices], dim=3))\n outs = torch.cat(rows, dim=2)\n return outs" } ]
import argparse, os, sys import cv2 import torch import numpy as np import intel_extension_for_pytorch as ipex from omegaconf import OmegaConf from PIL import Image from tqdm import tqdm, trange from itertools import islice from einops import rearrange from torchvision.utils import make_grid from pytorch_lightning import seed_everything from torch import autocast from contextlib import nullcontext from imwatermark import WatermarkEncoder from ldm.util import instantiate_from_config from ldm.models.diffusion.ddim import DDIMSampler from ldm.models.diffusion.plms import PLMSSampler from ldm.models.diffusion.dpm_solver import DPMSolverSampler from tiled_decode import tiled_vae_decoding
11,428
print(f"reading prompts from {opt.from_file}") with open(opt.from_file, "r") as f: data = f.read().splitlines() data = [p for p in data for i in range(opt.repeat)] data = list(chunk(data, batch_size)) sample_path = os.path.join(outpath, "samples") os.makedirs(sample_path, exist_ok=True) sample_count = 0 base_count = len(os.listdir(sample_path)) grid_count = len(os.listdir(outpath)) - 1 start_code = None if opt.fixed_code: start_code = torch.randn([opt.n_samples, opt.C, opt.H // opt.f, opt.W // opt.f], device=device) if opt.torchscript or opt.ipex: transformer = model.cond_stage_model.model unet = model.model.diffusion_model decoder = model.first_stage_model.decoder additional_context = torch.cpu.amp.autocast() if opt.bf16 else nullcontext() shape = [opt.C, opt.H // opt.f, opt.W // opt.f] if opt.bf16 and not opt.torchscript and not opt.ipex: raise ValueError('Bfloat16 is supported only for torchscript+ipex') if opt.bf16 and unet.dtype != torch.bfloat16: raise ValueError("Use configs/stable-diffusion/intel/ configs with bf16 enabled if " + "you'd like to use bfloat16 with CPU.") if unet.dtype == torch.float16 and device == torch.device("cpu"): raise ValueError("Use configs/stable-diffusion/intel/ configs for your model if you'd like to run it on CPU.") if opt.ipex: bf16_dtype = torch.bfloat16 if opt.bf16 else None transformer = transformer.to(memory_format=torch.channels_last) transformer = ipex.optimize(transformer, level="O1", inplace=True) unet = unet.to(memory_format=torch.channels_last) unet = ipex.optimize(unet, level="O1", auto_kernel_selection=True, inplace=True, dtype=bf16_dtype) decoder = decoder.to(memory_format=torch.channels_last) decoder = ipex.optimize(decoder, level="O1", auto_kernel_selection=True, inplace=True, dtype=bf16_dtype) if opt.torchscript: with torch.no_grad(), additional_context: # get UNET scripted if unet.use_checkpoint: raise ValueError("Gradient checkpoint won't work with tracing. " + "Use configs/stable-diffusion/intel/ configs for your model or disable checkpoint in your config.") img_in = torch.ones(2, 4, 96, 96, dtype=torch.float32) t_in = torch.ones(2, dtype=torch.int64) context = torch.ones(2, 77, 1024, dtype=torch.float32) scripted_unet = torch.jit.trace(unet, (img_in, t_in, context)) scripted_unet = torch.jit.optimize_for_inference(scripted_unet) print(type(scripted_unet)) model.model.scripted_diffusion_model = scripted_unet # get Decoder for first stage model scripted samples_ddim = torch.ones(1, 4, 96, 96, dtype=torch.float32) scripted_decoder = torch.jit.trace(decoder, (samples_ddim)) scripted_decoder = torch.jit.optimize_for_inference(scripted_decoder) print(type(scripted_decoder)) model.first_stage_model.decoder = scripted_decoder prompts = data[0] print("Running a forward pass to initialize optimizations") uc = None if opt.scale != 1.0: uc = model.get_learned_conditioning(batch_size * [""]) if isinstance(prompts, tuple): prompts = list(prompts) with torch.no_grad(), additional_context: for _ in range(3): c = model.get_learned_conditioning(prompts) samples_ddim, _ = sampler.sample(S=5, conditioning=c, batch_size=batch_size, shape=shape, verbose=False, unconditional_guidance_scale=opt.scale, unconditional_conditioning=uc, eta=opt.ddim_eta, x_T=start_code) print("Running a forward pass for decoder") for _ in range(3): x_samples_ddim = model.decode_first_stage(samples_ddim) precision_scope = autocast if opt.precision=="autocast" or opt.bf16 else nullcontext with torch.no_grad(), \ precision_scope(opt.device), \ model.ema_scope(): all_samples = list() for n in trange(opt.n_iter, desc="Sampling"): for prompts in tqdm(data, desc="data"): uc = None if opt.scale != 1.0: uc = model.get_learned_conditioning(batch_size * [""]) if isinstance(prompts, tuple): prompts = list(prompts) c = model.get_learned_conditioning(prompts) shape = [opt.C, opt.H // opt.f, opt.W // opt.f] samples, _ = sampler.sample(S=opt.steps, conditioning=c, batch_size=opt.n_samples, shape=shape, verbose=False, unconditional_guidance_scale=opt.scale, unconditional_conditioning=uc, eta=opt.ddim_eta, x_T=start_code, # redilation dilate=opt.dilate, dilate_tau=opt.dilate_tau, dilate_skip=opt.dilate_skip, progressive_dilate=opt.progressive_dilate ) if opt.tiled_decoding: bb,cc,hh,ww = samples.shape
sys.path.insert(0, os.getcwd()) torch.set_grad_enabled(False) def chunk(it, size): it = iter(it) return iter(lambda: tuple(islice(it, size)), ()) def load_model_from_config(config, ckpt, device=torch.device("cuda"), verbose=False): print(f"Loading model from {ckpt}") pl_sd = torch.load(ckpt, map_location="cpu") if "global_step" in pl_sd: print(f"Global Step: {pl_sd['global_step']}") sd = pl_sd["state_dict"] model = instantiate_from_config(config.model) m, u = model.load_state_dict(sd, strict=False) if len(m) > 0 and verbose: print("missing keys:") print(m) if len(u) > 0 and verbose: print("unexpected keys:") print(u) if device == torch.device("cuda"): model.cuda() elif device == torch.device("cpu"): model.cpu() model.cond_stage_model.device = "cpu" else: raise ValueError(f"Incorrect device name. Received: {device}") model.eval() return model def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( "--prompt", type=str, nargs="?", default="a professional photograph of an astronaut riding a triceratops", help="the prompt to render" ) parser.add_argument( "--outdir", type=str, nargs="?", help="dir to write results to", default="outputs/txt2img-samples" ) parser.add_argument( "--steps", type=int, default=50, help="number of ddim sampling steps", ) parser.add_argument( "--plms", action='store_true', help="use plms sampling", ) parser.add_argument( "--dpm", action='store_true', help="use DPM (2) sampler", ) parser.add_argument( "--fixed_code", action='store_true', help="if enabled, uses the same starting code across all samples ", ) parser.add_argument( "--ddim_eta", type=float, default=0.0, help="ddim eta (eta=0.0 corresponds to deterministic sampling", ) parser.add_argument( "--n_iter", type=int, default=3, help="sample this often", ) parser.add_argument( "--H", type=int, default=512, help="image height, in pixel space", ) parser.add_argument( "--W", type=int, default=512, help="image width, in pixel space", ) parser.add_argument( "--C", type=int, default=4, help="latent channels", ) parser.add_argument( "--f", type=int, default=8, help="downsampling factor, most often 8 or 16", ) parser.add_argument( "--n_samples", type=int, default=3, help="how many samples to produce for each given prompt. A.k.a batch size", ) parser.add_argument( "--n_rows", type=int, default=0, help="rows in the grid (default: n_samples)", ) parser.add_argument( "--scale", type=float, default=9.0, help="unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))", ) parser.add_argument( "--from-file", type=str, help="if specified, load prompts from this file, separated by newlines", ) parser.add_argument( "--config", type=str, default="configs/stable-diffusion/v2-inference.yaml", help="path to config which constructs model", ) parser.add_argument( "--ckpt", type=str, help="path to checkpoint of model", ) parser.add_argument( "--seed", type=int, default=42, help="the seed (for reproducible sampling)", ) parser.add_argument( "--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast" ) parser.add_argument( "--repeat", type=int, default=1, help="repeat each prompt in file this often", ) parser.add_argument( "--device", type=str, help="Device on which Stable Diffusion will be run", choices=["cpu", "cuda"], default="cpu" ) parser.add_argument( "--torchscript", action='store_true', help="Use TorchScript", ) parser.add_argument( "--ipex", action='store_true', help="Use Intel® Extension for PyTorch*", ) parser.add_argument( "--bf16", action='store_true', help="Use bfloat16", ) # redilation parser.add_argument( "--dilate", type=int, default=None, help="redilation factor", ) parser.add_argument( "--dilate_tau", type=int, default=None, help="timestep control, larger means more dilations", ) parser.add_argument( "--dilate_skip", type=int, default=None, help="layer control, larger means less dilations", ) parser.add_argument( "--progressive_dilate", action='store_true', help="Use progressive dilate", ) parser.add_argument( "--tiled_decoding", action='store_true', help="Use progressive dilate", ) parser.add_argument( "--overlap", type=int, default=24, help="length of overlapped regions", ) parser.add_argument( "--sync_gn", action='store_true', help="Use sync_gn", ) opt = parser.parse_args() return opt def put_watermark(img, wm_encoder=None): if wm_encoder is not None: img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) img = wm_encoder.encode(img, 'dwtDct') img = Image.fromarray(img[:, :, ::-1]) return img def main(opt): seed_everything(opt.seed) config = OmegaConf.load(f"{opt.config}") device = torch.device("cuda") if opt.device == "cuda" else torch.device("cpu") if opt.tiled_decoding: config.model.params.first_stage_config.params.tiled = True if opt.sync_gn: config.model.params.first_stage_config.params.ddconfig.sync_gn = True model = load_model_from_config(config, f"{opt.ckpt}", device) if opt.plms: sampler = PLMSSampler(model, device=device) elif opt.dpm: sampler = DPMSolverSampler(model, device=device) else: sampler = DDIMSampler(model, device=device) os.makedirs(opt.outdir, exist_ok=True) outpath = opt.outdir print("Creating invisible watermark encoder (see https://github.com/ShieldMnt/invisible-watermark)...") wm = "SDV2" wm_encoder = WatermarkEncoder() wm_encoder.set_watermark('bytes', wm.encode('utf-8')) batch_size = opt.n_samples n_rows = opt.n_rows if opt.n_rows > 0 else batch_size if not opt.from_file: prompt = opt.prompt assert prompt is not None data = [batch_size * [prompt]] else: print(f"reading prompts from {opt.from_file}") with open(opt.from_file, "r") as f: data = f.read().splitlines() data = [p for p in data for i in range(opt.repeat)] data = list(chunk(data, batch_size)) sample_path = os.path.join(outpath, "samples") os.makedirs(sample_path, exist_ok=True) sample_count = 0 base_count = len(os.listdir(sample_path)) grid_count = len(os.listdir(outpath)) - 1 start_code = None if opt.fixed_code: start_code = torch.randn([opt.n_samples, opt.C, opt.H // opt.f, opt.W // opt.f], device=device) if opt.torchscript or opt.ipex: transformer = model.cond_stage_model.model unet = model.model.diffusion_model decoder = model.first_stage_model.decoder additional_context = torch.cpu.amp.autocast() if opt.bf16 else nullcontext() shape = [opt.C, opt.H // opt.f, opt.W // opt.f] if opt.bf16 and not opt.torchscript and not opt.ipex: raise ValueError('Bfloat16 is supported only for torchscript+ipex') if opt.bf16 and unet.dtype != torch.bfloat16: raise ValueError("Use configs/stable-diffusion/intel/ configs with bf16 enabled if " + "you'd like to use bfloat16 with CPU.") if unet.dtype == torch.float16 and device == torch.device("cpu"): raise ValueError("Use configs/stable-diffusion/intel/ configs for your model if you'd like to run it on CPU.") if opt.ipex: bf16_dtype = torch.bfloat16 if opt.bf16 else None transformer = transformer.to(memory_format=torch.channels_last) transformer = ipex.optimize(transformer, level="O1", inplace=True) unet = unet.to(memory_format=torch.channels_last) unet = ipex.optimize(unet, level="O1", auto_kernel_selection=True, inplace=True, dtype=bf16_dtype) decoder = decoder.to(memory_format=torch.channels_last) decoder = ipex.optimize(decoder, level="O1", auto_kernel_selection=True, inplace=True, dtype=bf16_dtype) if opt.torchscript: with torch.no_grad(), additional_context: # get UNET scripted if unet.use_checkpoint: raise ValueError("Gradient checkpoint won't work with tracing. " + "Use configs/stable-diffusion/intel/ configs for your model or disable checkpoint in your config.") img_in = torch.ones(2, 4, 96, 96, dtype=torch.float32) t_in = torch.ones(2, dtype=torch.int64) context = torch.ones(2, 77, 1024, dtype=torch.float32) scripted_unet = torch.jit.trace(unet, (img_in, t_in, context)) scripted_unet = torch.jit.optimize_for_inference(scripted_unet) print(type(scripted_unet)) model.model.scripted_diffusion_model = scripted_unet # get Decoder for first stage model scripted samples_ddim = torch.ones(1, 4, 96, 96, dtype=torch.float32) scripted_decoder = torch.jit.trace(decoder, (samples_ddim)) scripted_decoder = torch.jit.optimize_for_inference(scripted_decoder) print(type(scripted_decoder)) model.first_stage_model.decoder = scripted_decoder prompts = data[0] print("Running a forward pass to initialize optimizations") uc = None if opt.scale != 1.0: uc = model.get_learned_conditioning(batch_size * [""]) if isinstance(prompts, tuple): prompts = list(prompts) with torch.no_grad(), additional_context: for _ in range(3): c = model.get_learned_conditioning(prompts) samples_ddim, _ = sampler.sample(S=5, conditioning=c, batch_size=batch_size, shape=shape, verbose=False, unconditional_guidance_scale=opt.scale, unconditional_conditioning=uc, eta=opt.ddim_eta, x_T=start_code) print("Running a forward pass for decoder") for _ in range(3): x_samples_ddim = model.decode_first_stage(samples_ddim) precision_scope = autocast if opt.precision=="autocast" or opt.bf16 else nullcontext with torch.no_grad(), \ precision_scope(opt.device), \ model.ema_scope(): all_samples = list() for n in trange(opt.n_iter, desc="Sampling"): for prompts in tqdm(data, desc="data"): uc = None if opt.scale != 1.0: uc = model.get_learned_conditioning(batch_size * [""]) if isinstance(prompts, tuple): prompts = list(prompts) c = model.get_learned_conditioning(prompts) shape = [opt.C, opt.H // opt.f, opt.W // opt.f] samples, _ = sampler.sample(S=opt.steps, conditioning=c, batch_size=opt.n_samples, shape=shape, verbose=False, unconditional_guidance_scale=opt.scale, unconditional_conditioning=uc, eta=opt.ddim_eta, x_T=start_code, # redilation dilate=opt.dilate, dilate_tau=opt.dilate_tau, dilate_skip=opt.dilate_skip, progressive_dilate=opt.progressive_dilate ) if opt.tiled_decoding: bb,cc,hh,ww = samples.shape
x_samples = tiled_vae_decoding(model, samples, window_size=hh//2, overlap=opt.overlap, sync_gn=opt.sync_gn)
4
2023-10-11 10:57:55+00:00
16k
bilibini/Lovely_Image_Downloader
py/Python38/site-packages/charset_normalizer/cd.py
[ { "identifier": "FREQUENCIES", "path": "py/Python38/site-packages/charset_normalizer/constant.py", "snippet": "FREQUENCIES: Dict[str, List[str]] = {\n \"English\": [\n \"e\",\n \"a\",\n \"t\",\n \"i\",\n \"o\",\n \"n\",\n \"s\",\n \"r\",\n \"h\",\n \"l\",\n \"d\",\n \"c\",\n \"u\",\n \"m\",\n \"f\",\n \"p\",\n \"g\",\n \"w\",\n \"y\",\n \"b\",\n \"v\",\n \"k\",\n \"x\",\n \"j\",\n \"z\",\n \"q\",\n ],\n \"English—\": [\n \"e\",\n \"a\",\n \"t\",\n \"i\",\n \"o\",\n \"n\",\n \"s\",\n \"r\",\n \"h\",\n \"l\",\n \"d\",\n \"c\",\n \"m\",\n \"u\",\n \"f\",\n \"p\",\n \"g\",\n \"w\",\n \"b\",\n \"y\",\n \"v\",\n \"k\",\n \"j\",\n \"x\",\n \"z\",\n \"q\",\n ],\n \"German\": [\n \"e\",\n \"n\",\n \"i\",\n \"r\",\n \"s\",\n \"t\",\n \"a\",\n \"d\",\n \"h\",\n \"u\",\n \"l\",\n \"g\",\n \"o\",\n \"c\",\n \"m\",\n \"b\",\n \"f\",\n \"k\",\n \"w\",\n \"z\",\n \"p\",\n \"v\",\n \"ü\",\n \"ä\",\n \"ö\",\n \"j\",\n ],\n \"French\": [\n \"e\",\n \"a\",\n \"s\",\n \"n\",\n \"i\",\n \"t\",\n \"r\",\n \"l\",\n \"u\",\n \"o\",\n \"d\",\n \"c\",\n \"p\",\n \"m\",\n \"é\",\n \"v\",\n \"g\",\n \"f\",\n \"b\",\n \"h\",\n \"q\",\n \"à\",\n \"x\",\n \"è\",\n \"y\",\n \"j\",\n ],\n \"Dutch\": [\n \"e\",\n \"n\",\n \"a\",\n \"i\",\n \"r\",\n \"t\",\n \"o\",\n \"d\",\n \"s\",\n \"l\",\n \"g\",\n \"h\",\n \"v\",\n \"m\",\n \"u\",\n \"k\",\n \"c\",\n \"p\",\n \"b\",\n \"w\",\n \"j\",\n \"z\",\n \"f\",\n \"y\",\n \"x\",\n \"ë\",\n ],\n \"Italian\": [\n \"e\",\n \"i\",\n \"a\",\n \"o\",\n \"n\",\n \"l\",\n \"t\",\n \"r\",\n \"s\",\n \"c\",\n \"d\",\n \"u\",\n \"p\",\n \"m\",\n \"g\",\n \"v\",\n \"f\",\n \"b\",\n \"z\",\n \"h\",\n \"q\",\n \"è\",\n \"à\",\n \"k\",\n \"y\",\n \"ò\",\n ],\n \"Polish\": [\n \"a\",\n \"i\",\n \"o\",\n \"e\",\n \"n\",\n \"r\",\n \"z\",\n \"w\",\n \"s\",\n \"c\",\n \"t\",\n \"k\",\n \"y\",\n \"d\",\n \"p\",\n \"m\",\n \"u\",\n \"l\",\n \"j\",\n \"ł\",\n \"g\",\n \"b\",\n \"h\",\n \"ą\",\n \"ę\",\n \"ó\",\n ],\n \"Spanish\": [\n \"e\",\n \"a\",\n \"o\",\n \"n\",\n \"s\",\n \"r\",\n \"i\",\n \"l\",\n \"d\",\n \"t\",\n \"c\",\n \"u\",\n \"m\",\n \"p\",\n \"b\",\n \"g\",\n \"v\",\n \"f\",\n \"y\",\n \"ó\",\n \"h\",\n \"q\",\n \"í\",\n \"j\",\n \"z\",\n \"á\",\n ],\n \"Russian\": [\n \"о\",\n \"а\",\n \"е\",\n \"и\",\n \"н\",\n \"с\",\n \"т\",\n \"р\",\n \"в\",\n \"л\",\n \"к\",\n \"м\",\n \"д\",\n \"п\",\n \"у\",\n \"г\",\n \"я\",\n \"ы\",\n \"з\",\n \"б\",\n \"й\",\n \"ь\",\n \"ч\",\n \"х\",\n \"ж\",\n \"ц\",\n ],\n # Jap-Kanji\n \"Japanese\": [\n \"人\",\n \"一\",\n \"大\",\n \"亅\",\n \"丁\",\n \"丨\",\n \"竹\",\n \"笑\",\n \"口\",\n \"日\",\n \"今\",\n \"二\",\n \"彳\",\n \"行\",\n \"十\",\n \"土\",\n \"丶\",\n \"寸\",\n \"寺\",\n \"時\",\n \"乙\",\n \"丿\",\n \"乂\",\n \"气\",\n \"気\",\n \"冂\",\n \"巾\",\n \"亠\",\n \"市\",\n \"目\",\n \"儿\",\n \"見\",\n \"八\",\n \"小\",\n \"凵\",\n \"県\",\n \"月\",\n \"彐\",\n \"門\",\n \"間\",\n \"木\",\n \"東\",\n \"山\",\n \"出\",\n \"本\",\n \"中\",\n \"刀\",\n \"分\",\n \"耳\",\n \"又\",\n \"取\",\n \"最\",\n \"言\",\n \"田\",\n \"心\",\n \"思\",\n \"刂\",\n \"前\",\n \"京\",\n \"尹\",\n \"事\",\n \"生\",\n \"厶\",\n \"云\",\n \"会\",\n \"未\",\n \"来\",\n \"白\",\n \"冫\",\n \"楽\",\n \"灬\",\n \"馬\",\n \"尸\",\n \"尺\",\n \"駅\",\n \"明\",\n \"耂\",\n \"者\",\n \"了\",\n \"阝\",\n \"都\",\n \"高\",\n \"卜\",\n \"占\",\n \"厂\",\n \"广\",\n \"店\",\n \"子\",\n \"申\",\n \"奄\",\n \"亻\",\n \"俺\",\n \"上\",\n \"方\",\n \"冖\",\n \"学\",\n \"衣\",\n \"艮\",\n \"食\",\n \"自\",\n ],\n # Jap-Katakana\n \"Japanese—\": [\n \"ー\",\n \"ン\",\n \"ス\",\n \"・\",\n \"ル\",\n \"ト\",\n \"リ\",\n \"イ\",\n \"ア\",\n \"ラ\",\n \"ッ\",\n \"ク\",\n \"ド\",\n \"シ\",\n \"レ\",\n \"ジ\",\n \"タ\",\n \"フ\",\n \"ロ\",\n \"カ\",\n \"テ\",\n \"マ\",\n \"ィ\",\n \"グ\",\n \"バ\",\n \"ム\",\n \"プ\",\n \"オ\",\n \"コ\",\n \"デ\",\n \"ニ\",\n \"ウ\",\n \"メ\",\n \"サ\",\n \"ビ\",\n \"ナ\",\n \"ブ\",\n \"ャ\",\n \"エ\",\n \"ュ\",\n \"チ\",\n \"キ\",\n \"ズ\",\n \"ダ\",\n \"パ\",\n \"ミ\",\n \"ェ\",\n \"ョ\",\n \"ハ\",\n \"セ\",\n \"ベ\",\n \"ガ\",\n \"モ\",\n \"ツ\",\n \"ネ\",\n \"ボ\",\n \"ソ\",\n \"ノ\",\n \"ァ\",\n \"ヴ\",\n \"ワ\",\n \"ポ\",\n \"ペ\",\n \"ピ\",\n \"ケ\",\n \"ゴ\",\n \"ギ\",\n \"ザ\",\n \"ホ\",\n \"ゲ\",\n \"ォ\",\n \"ヤ\",\n \"ヒ\",\n \"ユ\",\n \"ヨ\",\n \"ヘ\",\n \"ゼ\",\n \"ヌ\",\n \"ゥ\",\n \"ゾ\",\n \"ヶ\",\n \"ヂ\",\n \"ヲ\",\n \"ヅ\",\n \"ヵ\",\n \"ヱ\",\n \"ヰ\",\n \"ヮ\",\n \"ヽ\",\n \"゠\",\n \"ヾ\",\n \"ヷ\",\n \"ヿ\",\n \"ヸ\",\n \"ヹ\",\n \"ヺ\",\n ],\n # Jap-Hiragana\n \"Japanese——\": [\n \"の\",\n \"に\",\n \"る\",\n \"た\",\n \"と\",\n \"は\",\n \"し\",\n \"い\",\n \"を\",\n \"で\",\n \"て\",\n \"が\",\n \"な\",\n \"れ\",\n \"か\",\n \"ら\",\n \"さ\",\n \"っ\",\n \"り\",\n \"す\",\n \"あ\",\n \"も\",\n \"こ\",\n \"ま\",\n \"う\",\n \"く\",\n \"よ\",\n \"き\",\n \"ん\",\n \"め\",\n \"お\",\n \"け\",\n \"そ\",\n \"つ\",\n \"だ\",\n \"や\",\n \"え\",\n \"ど\",\n \"わ\",\n \"ち\",\n \"み\",\n \"せ\",\n \"じ\",\n \"ば\",\n \"へ\",\n \"び\",\n \"ず\",\n \"ろ\",\n \"ほ\",\n \"げ\",\n \"む\",\n \"べ\",\n \"ひ\",\n \"ょ\",\n \"ゆ\",\n \"ぶ\",\n \"ご\",\n \"ゃ\",\n \"ね\",\n \"ふ\",\n \"ぐ\",\n \"ぎ\",\n \"ぼ\",\n \"ゅ\",\n \"づ\",\n \"ざ\",\n \"ぞ\",\n \"ぬ\",\n \"ぜ\",\n \"ぱ\",\n \"ぽ\",\n \"ぷ\",\n \"ぴ\",\n \"ぃ\",\n \"ぁ\",\n \"ぇ\",\n \"ぺ\",\n \"ゞ\",\n \"ぢ\",\n \"ぉ\",\n \"ぅ\",\n \"ゐ\",\n \"ゝ\",\n \"ゑ\",\n \"゛\",\n \"゜\",\n \"ゎ\",\n \"ゔ\",\n \"゚\",\n \"ゟ\",\n \"゙\",\n \"ゕ\",\n \"ゖ\",\n ],\n \"Portuguese\": [\n \"a\",\n \"e\",\n \"o\",\n \"s\",\n \"i\",\n \"r\",\n \"d\",\n \"n\",\n \"t\",\n \"m\",\n \"u\",\n \"c\",\n \"l\",\n \"p\",\n \"g\",\n \"v\",\n \"b\",\n \"f\",\n \"h\",\n \"ã\",\n \"q\",\n \"é\",\n \"ç\",\n \"á\",\n \"z\",\n \"í\",\n ],\n \"Swedish\": [\n \"e\",\n \"a\",\n \"n\",\n \"r\",\n \"t\",\n \"s\",\n \"i\",\n \"l\",\n \"d\",\n \"o\",\n \"m\",\n \"k\",\n \"g\",\n \"v\",\n \"h\",\n \"f\",\n \"u\",\n \"p\",\n \"ä\",\n \"c\",\n \"b\",\n \"ö\",\n \"å\",\n \"y\",\n \"j\",\n \"x\",\n ],\n \"Chinese\": [\n \"的\",\n \"一\",\n \"是\",\n \"不\",\n \"了\",\n \"在\",\n \"人\",\n \"有\",\n \"我\",\n \"他\",\n \"这\",\n \"个\",\n \"们\",\n \"中\",\n \"来\",\n \"上\",\n \"大\",\n \"为\",\n \"和\",\n \"国\",\n \"地\",\n \"到\",\n \"以\",\n \"说\",\n \"时\",\n \"要\",\n \"就\",\n \"出\",\n \"会\",\n \"可\",\n \"也\",\n \"你\",\n \"对\",\n \"生\",\n \"能\",\n \"而\",\n \"子\",\n \"那\",\n \"得\",\n \"于\",\n \"着\",\n \"下\",\n \"自\",\n \"之\",\n \"年\",\n \"过\",\n \"发\",\n \"后\",\n \"作\",\n \"里\",\n \"用\",\n \"道\",\n \"行\",\n \"所\",\n \"然\",\n \"家\",\n \"种\",\n \"事\",\n \"成\",\n \"方\",\n \"多\",\n \"经\",\n \"么\",\n \"去\",\n \"法\",\n \"学\",\n \"如\",\n \"都\",\n \"同\",\n \"现\",\n \"当\",\n \"没\",\n \"动\",\n \"面\",\n \"起\",\n \"看\",\n \"定\",\n \"天\",\n \"分\",\n \"还\",\n \"进\",\n \"好\",\n \"小\",\n \"部\",\n \"其\",\n \"些\",\n \"主\",\n \"样\",\n \"理\",\n \"心\",\n \"她\",\n \"本\",\n \"前\",\n \"开\",\n \"但\",\n \"因\",\n \"只\",\n \"从\",\n \"想\",\n \"实\",\n ],\n \"Ukrainian\": [\n \"о\",\n \"а\",\n \"н\",\n \"і\",\n \"и\",\n \"р\",\n \"в\",\n \"т\",\n \"е\",\n \"с\",\n \"к\",\n \"л\",\n \"у\",\n \"д\",\n \"м\",\n \"п\",\n \"з\",\n \"я\",\n \"ь\",\n \"б\",\n \"г\",\n \"й\",\n \"ч\",\n \"х\",\n \"ц\",\n \"ї\",\n ],\n \"Norwegian\": [\n \"e\",\n \"r\",\n \"n\",\n \"t\",\n \"a\",\n \"s\",\n \"i\",\n \"o\",\n \"l\",\n \"d\",\n \"g\",\n \"k\",\n \"m\",\n \"v\",\n \"f\",\n \"p\",\n \"u\",\n \"b\",\n \"h\",\n \"å\",\n \"y\",\n \"j\",\n \"ø\",\n \"c\",\n \"æ\",\n \"w\",\n ],\n \"Finnish\": [\n \"a\",\n \"i\",\n \"n\",\n \"t\",\n \"e\",\n \"s\",\n \"l\",\n \"o\",\n \"u\",\n \"k\",\n \"ä\",\n \"m\",\n \"r\",\n \"v\",\n \"j\",\n \"h\",\n \"p\",\n \"y\",\n \"d\",\n \"ö\",\n \"g\",\n \"c\",\n \"b\",\n \"f\",\n \"w\",\n \"z\",\n ],\n \"Vietnamese\": [\n \"n\",\n \"h\",\n \"t\",\n \"i\",\n \"c\",\n \"g\",\n \"a\",\n \"o\",\n \"u\",\n \"m\",\n \"l\",\n \"r\",\n \"à\",\n \"đ\",\n \"s\",\n \"e\",\n \"v\",\n \"p\",\n \"b\",\n \"y\",\n \"ư\",\n \"d\",\n \"á\",\n \"k\",\n \"ộ\",\n \"ế\",\n ],\n \"Czech\": [\n \"o\",\n \"e\",\n \"a\",\n \"n\",\n \"t\",\n \"s\",\n \"i\",\n \"l\",\n \"v\",\n \"r\",\n \"k\",\n \"d\",\n \"u\",\n \"m\",\n \"p\",\n \"í\",\n \"c\",\n \"h\",\n \"z\",\n \"á\",\n \"y\",\n \"j\",\n \"b\",\n \"ě\",\n \"é\",\n \"ř\",\n ],\n \"Hungarian\": [\n \"e\",\n \"a\",\n \"t\",\n \"l\",\n \"s\",\n \"n\",\n \"k\",\n \"r\",\n \"i\",\n \"o\",\n \"z\",\n \"á\",\n \"é\",\n \"g\",\n \"m\",\n \"b\",\n \"y\",\n \"v\",\n \"d\",\n \"h\",\n \"u\",\n \"p\",\n \"j\",\n \"ö\",\n \"f\",\n \"c\",\n ],\n \"Korean\": [\n \"이\",\n \"다\",\n \"에\",\n \"의\",\n \"는\",\n \"로\",\n \"하\",\n \"을\",\n \"가\",\n \"고\",\n \"지\",\n \"서\",\n \"한\",\n \"은\",\n \"기\",\n \"으\",\n \"년\",\n \"대\",\n \"사\",\n \"시\",\n \"를\",\n \"리\",\n \"도\",\n \"인\",\n \"스\",\n \"일\",\n ],\n \"Indonesian\": [\n \"a\",\n \"n\",\n \"e\",\n \"i\",\n \"r\",\n \"t\",\n \"u\",\n \"s\",\n \"d\",\n \"k\",\n \"m\",\n \"l\",\n \"g\",\n \"p\",\n \"b\",\n \"o\",\n \"h\",\n \"y\",\n \"j\",\n \"c\",\n \"w\",\n \"f\",\n \"v\",\n \"z\",\n \"x\",\n \"q\",\n ],\n \"Turkish\": [\n \"a\",\n \"e\",\n \"i\",\n \"n\",\n \"r\",\n \"l\",\n \"ı\",\n \"k\",\n \"d\",\n \"t\",\n \"s\",\n \"m\",\n \"y\",\n \"u\",\n \"o\",\n \"b\",\n \"ü\",\n \"ş\",\n \"v\",\n \"g\",\n \"z\",\n \"h\",\n \"c\",\n \"p\",\n \"ç\",\n \"ğ\",\n ],\n \"Romanian\": [\n \"e\",\n \"i\",\n \"a\",\n \"r\",\n \"n\",\n \"t\",\n \"u\",\n \"l\",\n \"o\",\n \"c\",\n \"s\",\n \"d\",\n \"p\",\n \"m\",\n \"ă\",\n \"f\",\n \"v\",\n \"î\",\n \"g\",\n \"b\",\n \"ș\",\n \"ț\",\n \"z\",\n \"h\",\n \"â\",\n \"j\",\n ],\n \"Farsi\": [\n \"ا\",\n \"ی\",\n \"ر\",\n \"د\",\n \"ن\",\n \"ه\",\n \"و\",\n \"م\",\n \"ت\",\n \"ب\",\n \"س\",\n \"ل\",\n \"ک\",\n \"ش\",\n \"ز\",\n \"ف\",\n \"گ\",\n \"ع\",\n \"خ\",\n \"ق\",\n \"ج\",\n \"آ\",\n \"پ\",\n \"ح\",\n \"ط\",\n \"ص\",\n ],\n \"Arabic\": [\n \"ا\",\n \"ل\",\n \"ي\",\n \"م\",\n \"و\",\n \"ن\",\n \"ر\",\n \"ت\",\n \"ب\",\n \"ة\",\n \"ع\",\n \"د\",\n \"س\",\n \"ف\",\n \"ه\",\n \"ك\",\n \"ق\",\n \"أ\",\n \"ح\",\n \"ج\",\n \"ش\",\n \"ط\",\n \"ص\",\n \"ى\",\n \"خ\",\n \"إ\",\n ],\n \"Danish\": [\n \"e\",\n \"r\",\n \"n\",\n \"t\",\n \"a\",\n \"i\",\n \"s\",\n \"d\",\n \"l\",\n \"o\",\n \"g\",\n \"m\",\n \"k\",\n \"f\",\n \"v\",\n \"u\",\n \"b\",\n \"h\",\n \"p\",\n \"å\",\n \"y\",\n \"ø\",\n \"æ\",\n \"c\",\n \"j\",\n \"w\",\n ],\n \"Serbian\": [\n \"а\",\n \"и\",\n \"о\",\n \"е\",\n \"н\",\n \"р\",\n \"с\",\n \"у\",\n \"т\",\n \"к\",\n \"ј\",\n \"в\",\n \"д\",\n \"м\",\n \"п\",\n \"л\",\n \"г\",\n \"з\",\n \"б\",\n \"a\",\n \"i\",\n \"e\",\n \"o\",\n \"n\",\n \"ц\",\n \"ш\",\n ],\n \"Lithuanian\": [\n \"i\",\n \"a\",\n \"s\",\n \"o\",\n \"r\",\n \"e\",\n \"t\",\n \"n\",\n \"u\",\n \"k\",\n \"m\",\n \"l\",\n \"p\",\n \"v\",\n \"d\",\n \"j\",\n \"g\",\n \"ė\",\n \"b\",\n \"y\",\n \"ų\",\n \"š\",\n \"ž\",\n \"c\",\n \"ą\",\n \"į\",\n ],\n \"Slovene\": [\n \"e\",\n \"a\",\n \"i\",\n \"o\",\n \"n\",\n \"r\",\n \"s\",\n \"l\",\n \"t\",\n \"j\",\n \"v\",\n \"k\",\n \"d\",\n \"p\",\n \"m\",\n \"u\",\n \"z\",\n \"b\",\n \"g\",\n \"h\",\n \"č\",\n \"c\",\n \"š\",\n \"ž\",\n \"f\",\n \"y\",\n ],\n \"Slovak\": [\n \"o\",\n \"a\",\n \"e\",\n \"n\",\n \"i\",\n \"r\",\n \"v\",\n \"t\",\n \"s\",\n \"l\",\n \"k\",\n \"d\",\n \"m\",\n \"p\",\n \"u\",\n \"c\",\n \"h\",\n \"j\",\n \"b\",\n \"z\",\n \"á\",\n \"y\",\n \"ý\",\n \"í\",\n \"č\",\n \"é\",\n ],\n \"Hebrew\": [\n \"י\",\n \"ו\",\n \"ה\",\n \"ל\",\n \"ר\",\n \"ב\",\n \"ת\",\n \"מ\",\n \"א\",\n \"ש\",\n \"נ\",\n \"ע\",\n \"ם\",\n \"ד\",\n \"ק\",\n \"ח\",\n \"פ\",\n \"ס\",\n \"כ\",\n \"ג\",\n \"ט\",\n \"צ\",\n \"ן\",\n \"ז\",\n \"ך\",\n ],\n \"Bulgarian\": [\n \"а\",\n \"и\",\n \"о\",\n \"е\",\n \"н\",\n \"т\",\n \"р\",\n \"с\",\n \"в\",\n \"л\",\n \"к\",\n \"д\",\n \"п\",\n \"м\",\n \"з\",\n \"г\",\n \"я\",\n \"ъ\",\n \"у\",\n \"б\",\n \"ч\",\n \"ц\",\n \"й\",\n \"ж\",\n \"щ\",\n \"х\",\n ],\n \"Croatian\": [\n \"a\",\n \"i\",\n \"o\",\n \"e\",\n \"n\",\n \"r\",\n \"j\",\n \"s\",\n \"t\",\n \"u\",\n \"k\",\n \"l\",\n \"v\",\n \"d\",\n \"m\",\n \"p\",\n \"g\",\n \"z\",\n \"b\",\n \"c\",\n \"č\",\n \"h\",\n \"š\",\n \"ž\",\n \"ć\",\n \"f\",\n ],\n \"Hindi\": [\n \"क\",\n \"र\",\n \"स\",\n \"न\",\n \"त\",\n \"म\",\n \"ह\",\n \"प\",\n \"य\",\n \"ल\",\n \"व\",\n \"ज\",\n \"द\",\n \"ग\",\n \"ब\",\n \"श\",\n \"ट\",\n \"अ\",\n \"ए\",\n \"थ\",\n \"भ\",\n \"ड\",\n \"च\",\n \"ध\",\n \"ष\",\n \"इ\",\n ],\n \"Estonian\": [\n \"a\",\n \"i\",\n \"e\",\n \"s\",\n \"t\",\n \"l\",\n \"u\",\n \"n\",\n \"o\",\n \"k\",\n \"r\",\n \"d\",\n \"m\",\n \"v\",\n \"g\",\n \"p\",\n \"j\",\n \"h\",\n \"ä\",\n \"b\",\n \"õ\",\n \"ü\",\n \"f\",\n \"c\",\n \"ö\",\n \"y\",\n ],\n \"Thai\": [\n \"า\",\n \"น\",\n \"ร\",\n \"อ\",\n \"ก\",\n \"เ\",\n \"ง\",\n \"ม\",\n \"ย\",\n \"ล\",\n \"ว\",\n \"ด\",\n \"ท\",\n \"ส\",\n \"ต\",\n \"ะ\",\n \"ป\",\n \"บ\",\n \"ค\",\n \"ห\",\n \"แ\",\n \"จ\",\n \"พ\",\n \"ช\",\n \"ข\",\n \"ใ\",\n ],\n \"Greek\": [\n \"α\",\n \"τ\",\n \"ο\",\n \"ι\",\n \"ε\",\n \"ν\",\n \"ρ\",\n \"σ\",\n \"κ\",\n \"η\",\n \"π\",\n \"ς\",\n \"υ\",\n \"μ\",\n \"λ\",\n \"ί\",\n \"ό\",\n \"ά\",\n \"γ\",\n \"έ\",\n \"δ\",\n \"ή\",\n \"ω\",\n \"χ\",\n \"θ\",\n \"ύ\",\n ],\n \"Tamil\": [\n \"க\",\n \"த\",\n \"ப\",\n \"ட\",\n \"ர\",\n \"ம\",\n \"ல\",\n \"ன\",\n \"வ\",\n \"ற\",\n \"ய\",\n \"ள\",\n \"ச\",\n \"ந\",\n \"இ\",\n \"ண\",\n \"அ\",\n \"ஆ\",\n \"ழ\",\n \"ங\",\n \"எ\",\n \"உ\",\n \"ஒ\",\n \"ஸ\",\n ],\n \"Kazakh\": [\n \"а\",\n \"ы\",\n \"е\",\n \"н\",\n \"т\",\n \"р\",\n \"л\",\n \"і\",\n \"д\",\n \"с\",\n \"м\",\n \"қ\",\n \"к\",\n \"о\",\n \"б\",\n \"и\",\n \"у\",\n \"ғ\",\n \"ж\",\n \"ң\",\n \"з\",\n \"ш\",\n \"й\",\n \"п\",\n \"г\",\n \"ө\",\n ],\n}" }, { "identifier": "KO_NAMES", "path": "py/Python38/site-packages/charset_normalizer/constant.py", "snippet": "KO_NAMES: Set[str] = {\"johab\", \"cp949\", \"euc_kr\"}" }, { "identifier": "LANGUAGE_SUPPORTED_COUNT", "path": "py/Python38/site-packages/charset_normalizer/constant.py", "snippet": "LANGUAGE_SUPPORTED_COUNT: int = len(FREQUENCIES)" }, { "identifier": "TOO_SMALL_SEQUENCE", "path": "py/Python38/site-packages/charset_normalizer/constant.py", "snippet": "TOO_SMALL_SEQUENCE: int = 32" }, { "identifier": "ZH_NAMES", "path": "py/Python38/site-packages/charset_normalizer/constant.py", "snippet": "ZH_NAMES: Set[str] = {\"big5\", \"cp950\", \"big5hkscs\", \"hz\"}" }, { "identifier": "is_suspiciously_successive_range", "path": "py/Python38/site-packages/charset_normalizer/md.py", "snippet": "@lru_cache(maxsize=1024)\ndef is_suspiciously_successive_range(\n unicode_range_a: Optional[str], unicode_range_b: Optional[str]\n) -> bool:\n \"\"\"\n Determine if two Unicode range seen next to each other can be considered as suspicious.\n \"\"\"\n if unicode_range_a is None or unicode_range_b is None:\n return True\n\n if unicode_range_a == unicode_range_b:\n return False\n\n if \"Latin\" in unicode_range_a and \"Latin\" in unicode_range_b:\n return False\n\n if \"Emoticons\" in unicode_range_a or \"Emoticons\" in unicode_range_b:\n return False\n\n # Latin characters can be accompanied with a combining diacritical mark\n # eg. Vietnamese.\n if (\"Latin\" in unicode_range_a or \"Latin\" in unicode_range_b) and (\n \"Combining\" in unicode_range_a or \"Combining\" in unicode_range_b\n ):\n return False\n\n keywords_range_a, keywords_range_b = unicode_range_a.split(\n \" \"\n ), unicode_range_b.split(\" \")\n\n for el in keywords_range_a:\n if el in UNICODE_SECONDARY_RANGE_KEYWORD:\n continue\n if el in keywords_range_b:\n return False\n\n # Japanese Exception\n range_a_jp_chars, range_b_jp_chars = (\n unicode_range_a\n in (\n \"Hiragana\",\n \"Katakana\",\n ),\n unicode_range_b in (\"Hiragana\", \"Katakana\"),\n )\n if (range_a_jp_chars or range_b_jp_chars) and (\n \"CJK\" in unicode_range_a or \"CJK\" in unicode_range_b\n ):\n return False\n if range_a_jp_chars and range_b_jp_chars:\n return False\n\n if \"Hangul\" in unicode_range_a or \"Hangul\" in unicode_range_b:\n if \"CJK\" in unicode_range_a or \"CJK\" in unicode_range_b:\n return False\n if unicode_range_a == \"Basic Latin\" or unicode_range_b == \"Basic Latin\":\n return False\n\n # Chinese/Japanese use dedicated range for punctuation and/or separators.\n if (\"CJK\" in unicode_range_a or \"CJK\" in unicode_range_b) or (\n unicode_range_a in [\"Katakana\", \"Hiragana\"]\n and unicode_range_b in [\"Katakana\", \"Hiragana\"]\n ):\n if \"Punctuation\" in unicode_range_a or \"Punctuation\" in unicode_range_b:\n return False\n if \"Forms\" in unicode_range_a or \"Forms\" in unicode_range_b:\n return False\n\n return True" }, { "identifier": "CoherenceMatches", "path": "py/Python38/site-packages/charset_normalizer/models.py", "snippet": "class CharsetMatch:\nclass CharsetMatches:\nclass CliDetectionResult:\n def __init__(\n self,\n payload: bytes,\n guessed_encoding: str,\n mean_mess_ratio: float,\n has_sig_or_bom: bool,\n languages: \"CoherenceMatches\",\n decoded_payload: Optional[str] = None,\n ):\n def __eq__(self, other: object) -> bool:\n def __lt__(self, other: object) -> bool:\n def multi_byte_usage(self) -> float:\n def __str__(self) -> str:\n def __repr__(self) -> str:\n def add_submatch(self, other: \"CharsetMatch\") -> None:\n def encoding(self) -> str:\n def encoding_aliases(self) -> List[str]:\n def bom(self) -> bool:\n def byte_order_mark(self) -> bool:\n def languages(self) -> List[str]:\n def language(self) -> str:\n def chaos(self) -> float:\n def coherence(self) -> float:\n def percent_chaos(self) -> float:\n def percent_coherence(self) -> float:\n def raw(self) -> bytes:\n def submatch(self) -> List[\"CharsetMatch\"]:\n def has_submatch(self) -> bool:\n def alphabets(self) -> List[str]:\n def could_be_from_charset(self) -> List[str]:\n def output(self, encoding: str = \"utf_8\") -> bytes:\n def fingerprint(self) -> str:\n def __init__(self, results: Optional[List[CharsetMatch]] = None):\n def __iter__(self) -> Iterator[CharsetMatch]:\n def __getitem__(self, item: Union[int, str]) -> CharsetMatch:\n def __len__(self) -> int:\n def __bool__(self) -> bool:\n def append(self, item: CharsetMatch) -> None:\n def best(self) -> Optional[\"CharsetMatch\"]:\n def first(self) -> Optional[\"CharsetMatch\"]:\n def __init__(\n self,\n path: str,\n encoding: Optional[str],\n encoding_aliases: List[str],\n alternative_encodings: List[str],\n language: str,\n alphabets: List[str],\n has_sig_or_bom: bool,\n chaos: float,\n coherence: float,\n unicode_path: Optional[str],\n is_preferred: bool,\n ):\n def __dict__(self) -> Dict[str, Any]: # type: ignore\n def to_json(self) -> str:" }, { "identifier": "is_accentuated", "path": "py/Python38/site-packages/charset_normalizer/utils.py", "snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef is_accentuated(character: str) -> bool:\n try:\n description: str = unicodedata.name(character)\n except ValueError:\n return False\n return (\n \"WITH GRAVE\" in description\n or \"WITH ACUTE\" in description\n or \"WITH CEDILLA\" in description\n or \"WITH DIAERESIS\" in description\n or \"WITH CIRCUMFLEX\" in description\n or \"WITH TILDE\" in description\n )" }, { "identifier": "is_latin", "path": "py/Python38/site-packages/charset_normalizer/utils.py", "snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef is_latin(character: str) -> bool:\n try:\n description: str = unicodedata.name(character)\n except ValueError:\n return False\n return \"LATIN\" in description" }, { "identifier": "is_multi_byte_encoding", "path": "py/Python38/site-packages/charset_normalizer/utils.py", "snippet": "@lru_cache(maxsize=128)\ndef is_multi_byte_encoding(name: str) -> bool:\n \"\"\"\n Verify is a specific encoding is a multi byte one based on it IANA name\n \"\"\"\n return name in {\n \"utf_8\",\n \"utf_8_sig\",\n \"utf_16\",\n \"utf_16_be\",\n \"utf_16_le\",\n \"utf_32\",\n \"utf_32_le\",\n \"utf_32_be\",\n \"utf_7\",\n } or issubclass(\n importlib.import_module(\"encodings.{}\".format(name)).IncrementalDecoder,\n MultibyteIncrementalDecoder,\n )" }, { "identifier": "is_unicode_range_secondary", "path": "py/Python38/site-packages/charset_normalizer/utils.py", "snippet": "@lru_cache(maxsize=len(UNICODE_RANGES_COMBINED))\ndef is_unicode_range_secondary(range_name: str) -> bool:\n return any(keyword in range_name for keyword in UNICODE_SECONDARY_RANGE_KEYWORD)" }, { "identifier": "unicode_range", "path": "py/Python38/site-packages/charset_normalizer/utils.py", "snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef unicode_range(character: str) -> Optional[str]:\n \"\"\"\n Retrieve the Unicode range official name from a single character.\n \"\"\"\n character_ord: int = ord(character)\n\n for range_name, ord_range in UNICODE_RANGES_COMBINED.items():\n if character_ord in ord_range:\n return range_name\n\n return None" } ]
import importlib from codecs import IncrementalDecoder from collections import Counter from functools import lru_cache from typing import Counter as TypeCounter, Dict, List, Optional, Tuple from .constant import ( FREQUENCIES, KO_NAMES, LANGUAGE_SUPPORTED_COUNT, TOO_SMALL_SEQUENCE, ZH_NAMES, ) from .md import is_suspiciously_successive_range from .models import CoherenceMatches from .utils import ( is_accentuated, is_latin, is_multi_byte_encoding, is_unicode_range_secondary, unicode_range, )
11,252
if target_have_accents is False and source_have_accents: continue character_count: int = len(language_characters) character_match_count: int = len( [c for c in language_characters if c in characters] ) ratio: float = character_match_count / character_count if ratio >= 0.2: languages.append((language, ratio)) languages = sorted(languages, key=lambda x: x[1], reverse=True) return [compatible_language[0] for compatible_language in languages] def characters_popularity_compare( language: str, ordered_characters: List[str] ) -> float: """ Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language. The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit). Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.) """ if language not in FREQUENCIES: raise ValueError("{} not available".format(language)) character_approved_count: int = 0 FREQUENCIES_language_set = set(FREQUENCIES[language]) ordered_characters_count: int = len(ordered_characters) target_language_characters_count: int = len(FREQUENCIES[language]) large_alphabet: bool = target_language_characters_count > 26 for character, character_rank in zip( ordered_characters, range(0, ordered_characters_count) ): if character not in FREQUENCIES_language_set: continue character_rank_in_language: int = FREQUENCIES[language].index(character) expected_projection_ratio: float = ( target_language_characters_count / ordered_characters_count ) character_rank_projection: int = int(character_rank * expected_projection_ratio) if ( large_alphabet is False and abs(character_rank_projection - character_rank_in_language) > 4 ): continue if ( large_alphabet is True and abs(character_rank_projection - character_rank_in_language) < target_language_characters_count / 3 ): character_approved_count += 1 continue characters_before_source: List[str] = FREQUENCIES[language][ 0:character_rank_in_language ] characters_after_source: List[str] = FREQUENCIES[language][ character_rank_in_language: ] characters_before: List[str] = ordered_characters[0:character_rank] characters_after: List[str] = ordered_characters[character_rank:] before_match_count: int = len( set(characters_before) & set(characters_before_source) ) after_match_count: int = len( set(characters_after) & set(characters_after_source) ) if len(characters_before_source) == 0 and before_match_count <= 4: character_approved_count += 1 continue if len(characters_after_source) == 0 and after_match_count <= 4: character_approved_count += 1 continue if ( before_match_count / len(characters_before_source) >= 0.4 or after_match_count / len(characters_after_source) >= 0.4 ): character_approved_count += 1 continue return character_approved_count / len(ordered_characters) def alpha_unicode_split(decoded_sequence: str) -> List[str]: """ Given a decoded text sequence, return a list of str. Unicode range / alphabet separation. Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list; One containing the latin letters and the other hebrew. """ layers: Dict[str, str] = {} for character in decoded_sequence: if character.isalpha() is False: continue character_range: Optional[str] = unicode_range(character) if character_range is None: continue layer_target_range: Optional[str] = None for discovered_range in layers: if (
def encoding_unicode_range(iana_name: str) -> List[str]: """ Return associated unicode ranges in a single byte code page. """ if is_multi_byte_encoding(iana_name): raise IOError("Function not supported on multi-byte code page") decoder = importlib.import_module( "encodings.{}".format(iana_name) ).IncrementalDecoder p: IncrementalDecoder = decoder(errors="ignore") seen_ranges: Dict[str, int] = {} character_count: int = 0 for i in range(0x40, 0xFF): chunk: str = p.decode(bytes([i])) if chunk: character_range: Optional[str] = unicode_range(chunk) if character_range is None: continue if is_unicode_range_secondary(character_range) is False: if character_range not in seen_ranges: seen_ranges[character_range] = 0 seen_ranges[character_range] += 1 character_count += 1 return sorted( [ character_range for character_range in seen_ranges if seen_ranges[character_range] / character_count >= 0.15 ] ) def unicode_range_languages(primary_range: str) -> List[str]: """ Return inferred languages used with a unicode range. """ languages: List[str] = [] for language, characters in FREQUENCIES.items(): for character in characters: if unicode_range(character) == primary_range: languages.append(language) break return languages @lru_cache() def encoding_languages(iana_name: str) -> List[str]: """ Single-byte encoding language association. Some code page are heavily linked to particular language(s). This function does the correspondence. """ unicode_ranges: List[str] = encoding_unicode_range(iana_name) primary_range: Optional[str] = None for specified_range in unicode_ranges: if "Latin" not in specified_range: primary_range = specified_range break if primary_range is None: return ["Latin Based"] return unicode_range_languages(primary_range) @lru_cache() def mb_encoding_languages(iana_name: str) -> List[str]: """ Multi-byte encoding language association. Some code page are heavily linked to particular language(s). This function does the correspondence. """ if ( iana_name.startswith("shift_") or iana_name.startswith("iso2022_jp") or iana_name.startswith("euc_j") or iana_name == "cp932" ): return ["Japanese"] if iana_name.startswith("gb") or iana_name in ZH_NAMES: return ["Chinese"] if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES: return ["Korean"] return [] @lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT) def get_target_features(language: str) -> Tuple[bool, bool]: """ Determine main aspects from a supported language if it contains accents and if is pure Latin. """ target_have_accents: bool = False target_pure_latin: bool = True for character in FREQUENCIES[language]: if not target_have_accents and is_accentuated(character): target_have_accents = True if target_pure_latin and is_latin(character) is False: target_pure_latin = False return target_have_accents, target_pure_latin def alphabet_languages( characters: List[str], ignore_non_latin: bool = False ) -> List[str]: """ Return associated languages associated to given characters. """ languages: List[Tuple[str, float]] = [] source_have_accents = any(is_accentuated(character) for character in characters) for language, language_characters in FREQUENCIES.items(): target_have_accents, target_pure_latin = get_target_features(language) if ignore_non_latin and target_pure_latin is False: continue if target_have_accents is False and source_have_accents: continue character_count: int = len(language_characters) character_match_count: int = len( [c for c in language_characters if c in characters] ) ratio: float = character_match_count / character_count if ratio >= 0.2: languages.append((language, ratio)) languages = sorted(languages, key=lambda x: x[1], reverse=True) return [compatible_language[0] for compatible_language in languages] def characters_popularity_compare( language: str, ordered_characters: List[str] ) -> float: """ Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language. The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit). Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.) """ if language not in FREQUENCIES: raise ValueError("{} not available".format(language)) character_approved_count: int = 0 FREQUENCIES_language_set = set(FREQUENCIES[language]) ordered_characters_count: int = len(ordered_characters) target_language_characters_count: int = len(FREQUENCIES[language]) large_alphabet: bool = target_language_characters_count > 26 for character, character_rank in zip( ordered_characters, range(0, ordered_characters_count) ): if character not in FREQUENCIES_language_set: continue character_rank_in_language: int = FREQUENCIES[language].index(character) expected_projection_ratio: float = ( target_language_characters_count / ordered_characters_count ) character_rank_projection: int = int(character_rank * expected_projection_ratio) if ( large_alphabet is False and abs(character_rank_projection - character_rank_in_language) > 4 ): continue if ( large_alphabet is True and abs(character_rank_projection - character_rank_in_language) < target_language_characters_count / 3 ): character_approved_count += 1 continue characters_before_source: List[str] = FREQUENCIES[language][ 0:character_rank_in_language ] characters_after_source: List[str] = FREQUENCIES[language][ character_rank_in_language: ] characters_before: List[str] = ordered_characters[0:character_rank] characters_after: List[str] = ordered_characters[character_rank:] before_match_count: int = len( set(characters_before) & set(characters_before_source) ) after_match_count: int = len( set(characters_after) & set(characters_after_source) ) if len(characters_before_source) == 0 and before_match_count <= 4: character_approved_count += 1 continue if len(characters_after_source) == 0 and after_match_count <= 4: character_approved_count += 1 continue if ( before_match_count / len(characters_before_source) >= 0.4 or after_match_count / len(characters_after_source) >= 0.4 ): character_approved_count += 1 continue return character_approved_count / len(ordered_characters) def alpha_unicode_split(decoded_sequence: str) -> List[str]: """ Given a decoded text sequence, return a list of str. Unicode range / alphabet separation. Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list; One containing the latin letters and the other hebrew. """ layers: Dict[str, str] = {} for character in decoded_sequence: if character.isalpha() is False: continue character_range: Optional[str] = unicode_range(character) if character_range is None: continue layer_target_range: Optional[str] = None for discovered_range in layers: if (
is_suspiciously_successive_range(discovered_range, character_range)
5
2023-10-11 09:08:57+00:00
16k
MTgeophysics/mtpy-v2
mtpy/modeling/occam1d/startup.py
[ { "identifier": "Occam1DData", "path": "mtpy/modeling/occam1d/data.py", "snippet": "class Occam1DData(object):\n \"\"\"\n reads and writes occam 1D data files\n\n ===================== =====================================================\n Attributes Description\n ===================== =====================================================\n _data_fn basename of data file *default* is Occam1DDataFile\n _header_line header line for description of data columns\n _ss string spacing *default* is 6*' '\n _string_fmt format of data *default* is '+.6e'\n data array of data\n data_fn full path to data file\n freq frequency array of data\n mode mode to invert for [ 'TE' | 'TM' | 'det' ]\n phase_te array of TE phase\n phase_tm array of TM phase\n res_te array of TE apparent resistivity\n res_tm array of TM apparent resistivity\n resp_fn full path to response file\n save_path path to save files to\n ===================== =====================================================\n\n\n ===================== =====================================================\n Methods Description\n ===================== =====================================================\n write_data_file write an Occam1D data file\n read_data_file read an Occam1D data file\n read_resp_file read a .resp file output by Occam1D\n ===================== =====================================================\n\n :Example: ::\n\n >>> import mtpy.modeling.occam1d as occam1d\n >>> #--> make a data file for TE mode\n >>> d1 = occam1d.Data()\n >>> d1.write_data_file(edi_file=r'/home/MT/mt01.edi', res_err=10, phase_err=2.5,\n >>> ... save_path=r\"/home/occam1d/mt01/TE\", mode='TE')\n\n \"\"\"\n\n def __init__(self, mt_dataframe, **kwargs):\n self.logger = logger\n self.mt_dataframe = MTDataFrame(data=mt_dataframe)\n\n self._string_fmt = \"+.6e\"\n self._ss = 6 * \" \"\n self._acceptable_modes = [\"te\" \"tm\", \"det\", \"detz\", \"tez\", \"tmz\"]\n self._data_fn = \"Occam1d_DataFile\"\n self._header_line = \"!{0}\\n\".format(\n \" \".join([\"Type\", \"Freq#\", \"TX#\", \"Rx#\", \"Data\", \"Std_Error\"])\n )\n self.mode = \"det\"\n self.data = None\n self.rotation_angle = 0\n\n self.data_1 = None\n self.data_1_error = None\n self.data_2 = None\n self.data_2_error = None\n\n self.save_path = Path().cwd()\n self.data_fn = self.save_path.joinpath(self._data_fn)\n\n for key in list(kwargs.keys()):\n setattr(self, key, kwargs[key])\n\n def __str__(self):\n lines = [\"Occam 1D Data:\"]\n lines.append(f\"\\tMode: {self.mode}\")\n\n return \"\\n\".join(lines)\n\n def __repr__(self):\n return self.__str__()\n\n @property\n def mode_01(self):\n if self.mode == \"te\":\n return \"RhoZxy\"\n elif self.mode == \"tm\":\n return \"RhoZyx\"\n elif self.mode == \"det\":\n return \"RhoZxy\"\n elif self.mode == \"detz\":\n return \"RealZxy\"\n elif self.mode == \"tez\":\n return \"RealZxy\"\n elif self.mode == \"tmz\":\n return \"RealZyx\"\n\n @property\n def mode_02(self):\n if self.mode == \"te\":\n return \"PhsZxy\"\n elif self.mode == \"tm\":\n return \"PhsZyx\"\n elif self.mode == \"det\":\n return \"PhsZxy\"\n elif self.mode == \"detz\":\n return \"ImagZxy\"\n elif self.mode == \"tez\":\n return \"ImagZxy\"\n elif self.mode == \"tmz\":\n return \"ImagZyx\"\n\n @property\n def mode(self):\n return self._mode\n\n @mode.setter\n def mode(self, mode):\n\n if mode not in self._acceptable_modes:\n raise ValueError(\n f\"Mode {mode} not in accetable modes {self._acceptable_modes}\"\n )\n self._mode = mode\n\n def _get_sub_dataframe(self):\n if self._mode == \"te\":\n sub_df = pd.DataFrame(\n {\n \"frequency\": 1.0 / self.mt_dataframe.dataframe.period,\n \"data_1\": self.mt_dataframe.dataframe.res_xy,\n \"data_1_error\": self.mt_dataframe.dataframe.res_xy_model_error,\n \"data_2\": self.mt_dataframe.dataframe.phase_xy,\n \"data_2_error\": self.mt_dataframe.dataframe.phase_xy_model_error,\n }\n )\n\n elif self._mode == \"tm\":\n sub_df = pd.DataFrame(\n {\n \"frequency\": 1.0 / self.mt_dataframe.dataframe.period,\n \"data_1\": self.mt_dataframe.dataframe.res_yx,\n \"data_1_error\": self.mt_dataframe.dataframe.res_yx_model_error,\n \"data_2\": self.mt_dataframe.dataframe.phase_yx,\n \"data_2_error\": self.mt_dataframe.dataframe.phase_yx_model_error,\n }\n )\n\n elif self._mode == \"det\":\n z_obj = self.mt_dataframe.to_z_object()\n\n sub_df = pd.DataFrame(\n {\n \"frequency\": 1.0 / self.mt_dataframe.dataframe.period,\n \"data_1\": z_obj.det.real,\n \"data_1_error\": z_obj.det_model_error,\n \"data_2\": z_obj.det.imag,\n \"data_2_error\": z_obj.det_model_error,\n }\n )\n\n elif self._mode == \"detz\":\n z_obj = self.mt_dataframe.to_z_object()\n sub_df = pd.DataFrame(\n {\n \"frequency\": 1.0 / self.mt_dataframe.dataframe.period,\n \"data_1\": z_obj.det.real * np.pi * 4e-4,\n \"data_1_error\": z_obj.det_model_error * np.pi * 4e-4,\n \"data_2\": z_obj.det.imag * np.pi * 4e-4,\n \"data_2_error\": z_obj.det_model_error * np.pi * 4e-4,\n }\n )\n\n elif self.mode == \"tez\":\n sub_df = pd.DataFrame(\n {\n \"frequency\": 1.0 / self.mt_dataframe.dataframe.period,\n \"data_1\": self.mt_dataframe.dataframe.zxy.real\n * np.pi\n * 4e-4,\n \"data_1_error\": self.mt_dataframe.dataframe.zxy_model_error\n * np.pi\n * 4e-4,\n \"data_2\": self.mt_dataframe.dataframe.zxy.imag\n * np.pi\n * 4e-4,\n \"data_2_error\": self.mt_dataframe.dataframe.zxy_model_error\n * np.pi\n * 4e-4,\n }\n )\n\n elif self.mode == \"tmz\":\n sub_df = pd.DataFrame(\n {\n \"frequency\": 1.0 / self.mt_dataframe.dataframe.period,\n \"data_1\": self.mt_dataframe.dataframe.zyx.real\n * np.pi\n * 4e-4,\n \"data_1_error\": self.mt_dataframe.dataframe.zyx_model_error\n * np.pi\n * 4e-4,\n \"data_2\": self.mt_dataframe.dataframe.zyx.imag\n * np.pi\n * 4e-4,\n \"data_2_error\": self.mt_dataframe.dataframe.zyx_model_error\n * np.pi\n * 4e-4,\n }\n )\n\n sub_df = sub_df.sort_values(\"frequency\", ascending=False).reindex()\n\n return sub_df\n\n def write_data_file(\n self,\n filename,\n mode=\"det\",\n remove_outofquadrant=False,\n ):\n \"\"\"\n make1Ddatafile will write a data file for Occam1D\n\n Arguments:\n ---------\n **rp_tuple** : np.ndarray (freq, res, res_err, phase, phase_err)\n with res, phase having shape (num_freq, 2, 2).\n\n **edi_file** : string\n full path to edi file to be modeled.\n\n **save_path** : string\n path to save the file, if None set to dirname of\n station if edipath = None. Otherwise set to\n dirname of edipath.\n\n **thetar** : float\n rotation angle to rotate Z. Clockwise positive and N=0\n *default* = 0\n\n **mode** : [ 'te' | 'tm' | 'det']\n mode to model can be (*default*='both'):\n - 'te' for just TE mode (res/phase)\n - 'tm' for just TM mode (res/phase)\n - 'det' for the determinant of Z (converted to\n res/phase)\n add 'z' to any of these options to model\n impedance tensor values instead of res/phase\n\n\n **res_err** : float\n errorbar for resistivity values. Can be set to (\n *default* = 'data'):\n\n - 'data' for errorbars from the data\n - percent number ex. 10 for ten percent\n\n **phase_err** : float\n errorbar for phase values. Can be set to (\n *default* = 'data'):\n\n - 'data' for errorbars from the data\n - percent number ex. 10 for ten percent\n **res_errorfloor**: float\n error floor for resistivity values\n in percent\n **phase_errorfloor**: float\n error floor for phase in degrees\n **remove_outofquadrant**: True/False; option to remove the resistivity and\n phase values for points with phases out\n of the 1st/3rd quadrant (occam requires\n 0 < phase < 90 degrees; phases in the 3rd\n quadrant are shifted to the first by\n adding 180 degrees)\n\n :Example: ::\n\n >>> import mtpy.modeling.occam1d as occam1d\n >>> #--> make a data file\n >>> d1 = occam1d.Data()\n >>> d1.write_data_file(edi_file=r'/home/MT/mt01.edi', res_err=10,\n >>> ... phase_err=2.5, mode='TE',\n >>> ... save_path=r\"/home/occam1d/mt01/TE\")\n \"\"\"\n # be sure that the input mode is not case sensitive\n self.mode = mode.lower()\n\n sub_df = self._get_sub_dataframe()\n\n if remove_outofquadrant:\n self._remove_outofquadrant_phase()\n\n # --> write file\n # make sure the savepath exists, if not create it\n self.data_fn = Path(filename)\n\n # --> write file as a list of lines\n dlines = []\n\n dlines.append(\"Format: EMData_1.1 \\n\")\n dlines.append(f\"!mode: {mode.upper()}\\n\")\n dlines.append(f\"!rotation_angle = {self.rotation_angle:.2f}\\n\")\n\n # needs a transmitter to work so put in a dummy one\n dlines.append(\"# Transmitters: 1\\n\")\n dlines.append(\"0 0 0 0 0 \\n\")\n\n nf = sub_df.frequency.size\n # write frequencies\n dlines.append(f\"# Frequencies: {nf}\\n\")\n for ff in sub_df.frequency:\n dlines.append(f\" {ff:{self._string_fmt}}\\n\")\n\n # needs a receiver to work so put in a dummy one\n dlines.append(\"# Receivers: 1 \\n\")\n dlines.append(\"0 0 0 0 0 0 \\n\")\n\n # write data\n dlines.append(f\"# Data:{self._ss}{2 * nf}\\n\")\n num_data_line = len(dlines)\n\n dlines.append(self._header_line)\n data_count = 0\n\n for row in sub_df.itertuples():\n # write lines\n dlines.append(\n self._ss.join(\n [\n self.mode_01,\n str(row.Index + 1),\n \"0\",\n \"1\",\n f\"{row.data_1:{self._string_fmt}}\",\n f\"{row.data_1_error:{self._string_fmt}}\\n\",\n ]\n )\n )\n data_count += 1\n dlines.append(\n self._ss.join(\n [\n self.mode_02,\n str(row.Index + 1),\n \"0\",\n \"1\",\n f\"{row.data_2:{self._string_fmt}}\",\n f\"{row.data_2_error:{self._string_fmt}}\\n\",\n ]\n )\n )\n data_count += 1\n\n # --> write file\n dlines[num_data_line - 1] = f\"# Data:{self._ss}{data_count}\\n\"\n\n with open(self.data_fn, \"w\") as dfid:\n dfid.writelines(dlines)\n\n self.logger.info(f\"Wrote Data File to : {self.data_fn}\")\n\n def _remove_outofquadrant_phase(self, sub_df):\n \"\"\"\n remove out of quadrant phase from data\n \"\"\"\n # remove data points with phase out of quadrant\n if \"z\" in self.mode:\n sub_df.loc[\n (sub_df.data_1 / sub_df.data_2 > 0), [\"data_1\", \"data_2\"]\n ] = 0\n\n elif self.mode in [\"det\", \"te\", \"tm\"]:\n sub_df.loc[(sub_df.data_2 % 180 < 0), \"data_2\"] = 0\n\n return sub_df\n\n def _remove_zeros(self, sub_df):\n \"\"\"\n remove zeros from the data frame\n\n :param sub_df: DESCRIPTION\n :type sub_df: TYPE\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n sub_df.loc[(sub_df != 0).any(axis=1)]\n return sub_df\n\n def read_data_file(self, data_fn):\n \"\"\"\n reads a 1D data file\n\n Arguments:\n ----------\n **data_fn** : full path to data file\n\n Returns:\n --------\n **Occam1D.rpdict** : dictionary with keys:\n\n *'freq'* : an array of frequencies with length nf\n\n *'resxy'* : TE resistivity array with shape (nf,4) for (0) data,\n (1) dataerr, (2) model, (3) modelerr\n\n *'resyx'* : TM resistivity array with shape (nf,4) for (0) data,\n (1) dataerr, (2) model, (3) modelerr\n\n *'phasexy'* : TE phase array with shape (nf,4) for (0) data,\n (1) dataerr, (2) model, (3) modelerr\n\n *'phaseyx'* : TM phase array with shape (nf,4) for (0) data,\n (1) dataerr, (2) model, (3) modelerr\n\n :Example: ::\n\n >>> old = occam1d.Data()\n >>> old.data_fn = r\"/home/Occam1D/Line1/Inv1_TE/MT01TE.dat\"\n >>> old.read_data_file()\n \"\"\"\n\n self.data_fn = Path(data_fn)\n if not self.data_fn.exists():\n raise IOError(f\"Could not find {self.data_fn}, check path\")\n\n self.save_path = self.data_fn.parent\n\n with open(self.data_fn, \"r\") as fid:\n dlines = fid.readlines()\n\n # make a dictionary of all the fields found so can put them into arrays\n finddict = {}\n for ii, dline in enumerate(dlines):\n if dline.find(\"#\") <= 3:\n fkey = dline[2:].strip().split(\":\")[0]\n fvalue = ii\n finddict[fkey] = fvalue\n\n # get number of frequencies\n nfreq = int(\n dlines[finddict[\"Frequencies\"]][2:].strip().split(\":\")[1].strip()\n )\n\n # frequency list\n freq = np.array(\n [\n float(ff)\n for ff in dlines[\n finddict[\"Frequencies\"] + 1 : finddict[\"Receivers\"]\n ]\n ]\n )\n\n # data dictionary to put things into\n # check to see if there is alread one, if not make a new one\n data = {\n \"frequency\": freq,\n \"zxy\": np.zeros(nfreq, dtype=complex),\n \"zyx\": np.zeros(nfreq, dtype=complex),\n \"res_xy\": np.zeros(nfreq),\n \"res_yx\": np.zeros(nfreq),\n \"phase_xy\": np.zeros(nfreq),\n \"phase_yx\": np.zeros(nfreq),\n \"zxy_model_error\": np.zeros(nfreq),\n \"zyx_model_error\": np.zeros(nfreq),\n \"res_xy_model_error\": np.zeros(nfreq),\n \"res_yx_model_error\": np.zeros(nfreq),\n \"phase_xy_model_error\": np.zeros(nfreq),\n \"phase_yx_model_error\": np.zeros(nfreq),\n }\n\n # get data\n for dline in dlines[finddict[\"Data\"] + 1 :]:\n if dline.find(\"!\") == 0:\n pass\n else:\n dlst = dline.strip().split()\n dlst = [dd.strip() for dd in dlst]\n if len(dlst) > 4:\n jj = int(dlst[1]) - 1\n dvalue = float(dlst[4])\n derr = float(dlst[5])\n if dlst[0] in [\"RhoZxy\", \"103\"]:\n self.mode = \"te\"\n data[\"res_xy\"][jj] = dvalue\n data[\"res_xy_model_error\"][jj] = derr\n elif dlst[0] in [\"PhsZxy\", \"104\"]:\n self.mode = \"te\"\n data[\"phase_xy\"][jj] = dvalue\n data[\"phase_xy_model_error\"][jj] = derr\n elif dlst[0] in [\"RhoZyx\", \"105\"]:\n self.mode = \"tm\"\n data[\"res_yx\"][jj] = dvalue\n data[\"res_yx_model_error\"][jj] = derr\n elif dlst[0] in [\"PhsZyx\", \"106\"]:\n self.mode = \"TM\"\n data[\"phase_yx\"][jj] = dvalue\n data[\"phase_yx_model_error\"][jj] = derr\n elif dlst[0] in [\"RealZxy\", \"113\"]:\n self.mode = \"tez\"\n data[\"zxy\"][jj] += dvalue / (np.pi * 4e-4)\n data[\"zxy_model_error\"][jj] = derr / (np.pi * 4e-4)\n elif dlst[0] in [\"ImagZxy\", \"114\"]:\n self.mode = \"tez\"\n data[\"zxy\"][jj] += 1j * dvalue / (np.pi * 4e-4)\n data[\"zxy_model_error\"][jj] = derr / (np.pi * 4e-4)\n elif dlst[0] in [\"RealZyx\", \"115\"]:\n self.mode = \"tmz\"\n data[\"zyx\"][jj] += dvalue / (np.pi * 4e-4)\n data[\"zyx_model_error\"][jj] = derr / (np.pi * 4e-4)\n elif dlst[0] in [\"ImagZyx\", \"116\"]:\n self.mode = \"tmz\"\n data[\"zyx\"][jj] += 1j * dvalue / (np.pi * 4e-4)\n data[\"zyx_model_error\"][jj] = derr / (np.pi * 4e-4)\n\n df = pd.DataFrame(data)\n self.mt_dataframe = MTDataFrame(data=df)\n\n def read_resp_file(self, resp_fn=None, data_fn=None):\n \"\"\"\n read response file\n\n Arguments:\n ---------\n **resp_fn** : full path to response file\n\n **data_fn** : full path to data file\n\n Fills:\n --------\n\n *freq* : an array of frequencies with length nf\n\n *res_te* : TE resistivity array with shape (nf,4) for (0) data,\n (1) dataerr, (2) model, (3) modelerr\n\n *res_tm* : TM resistivity array with shape (nf,4) for (0) data,\n (1) dataerr, (2) model, (3) modelerr\n\n *phase_te* : TE phase array with shape (nf,4) for (0) data,\n (1) dataerr, (2) model, (3) modelerr\n\n *phase_tm* : TM phase array with shape (nf,4) for (0) data,\n (1) dataerr, (2) model, (3) modelerr\n\n :Example: ::\n >>> o1d = occam1d.Data()\n >>> o1d.data_fn = r\"/home/occam1d/mt01/TE/Occam1D_DataFile_TE.dat\"\n >>> o1d.read_resp_file(r\"/home/occam1d/mt01/TE/TE_7.resp\")\n\n \"\"\"\n\n if resp_fn is not None:\n self.resp_fn = resp_fn\n if self.resp_fn is None:\n raise IOError(\"Need to input response file\")\n\n if data_fn is not None:\n self.data_fn = data_fn\n if self.data_fn is None:\n raise IOError(\"Need to input data file\")\n # --> read in data file\n self.read_data_file()\n\n # --> read response file\n dfid = open(self.resp_fn, \"r\")\n\n dlines = dfid.readlines()\n dfid.close()\n\n finddict = {}\n for ii, dline in enumerate(dlines):\n if dline.find(\"#\") <= 3:\n fkey = dline[2:].strip().split(\":\")[0]\n fvalue = ii\n finddict[fkey] = fvalue\n\n for dline in dlines[finddict[\"Data\"] + 1 :]:\n if dline.find(\"!\") == 0:\n pass\n else:\n dlst = dline.strip().split()\n if len(dlst) > 4:\n jj = int(dlst[1]) - 1\n dvalue = float(dlst[4])\n derr = float(dlst[5])\n rvalue = float(dlst[6])\n try:\n rerr = float(dlst[7])\n except ValueError:\n rerr = 1000.0\n if dlst[0] == \"RhoZxy\" or dlst[0] == \"103\":\n self.res_te[0, jj] = dvalue\n self.res_te[jj] = derr\n self.res_te[2, jj] = rvalue\n self.res_te[3, jj] = rerr\n if dlst[0] == \"PhsZxy\" or dlst[0] == \"104\":\n self.phase_te[0, jj] = dvalue\n self.phase_te[jj] = derr\n self.phase_te[2, jj] = rvalue\n self.phase_te[3, jj] = rerr\n if dlst[0] == \"RhoZyx\" or dlst[0] == \"105\":\n self.res_tm[0, jj] = dvalue\n self.res_tm[jj] = derr\n self.res_tm[2, jj] = rvalue\n self.res_tm[3, jj] = rerr\n if dlst[0] == \"PhsZyx\" or dlst[0] == \"106\":\n self.phase_tm[0, jj] = dvalue\n self.phase_tm[jj] = derr\n self.phase_tm[2, jj] = rvalue\n self.phase_tm[3, jj] = rerr\n if dlst[0] == \"RealZxy\" or dlst[0] == \"113\":\n self.mode = \"TEz\"\n self.data[\"zxy\"][0, jj] = dvalue / (np.pi * 4e-4)\n self.data[\"zxy\"][jj] = derr / (np.pi * 4e-4)\n self.data[\"zxy\"][2, jj] = rvalue / (np.pi * 4e-4)\n self.data[\"zxy\"][3, jj] = rerr\n if dlst[0] == \"ImagZxy\" or dlst[0] == \"114\":\n self.mode = \"TEz\"\n self.data[\"zxy\"][0, jj] += 1j * dvalue / (np.pi * 4e-4)\n self.data[\"zxy\"][jj] = derr / (np.pi * 4e-4)\n self.data[\"zxy\"][2, jj] += 1j * rvalue / (np.pi * 4e-4)\n self.data[\"zxy\"][3, jj] = rerr\n if dlst[0] == \"RealZyx\" or dlst[0] == \"115\":\n self.mode = \"TMz\"\n self.data[\"zyx\"][0, jj] = dvalue / (np.pi * 4e-4)\n self.data[\"zyx\"][jj] = derr / (np.pi * 4e-4)\n self.data[\"zyx\"][2, jj] = rvalue / (np.pi * 4e-4)\n self.data[\"zyx\"][3, jj] = rerr\n if dlst[0] == \"ImagZyx\" or dlst[0] == \"116\":\n self.mode = \"TMz\"\n self.data[\"zyx\"][0, jj] += 1j * dvalue / (np.pi * 4e-4)\n self.data[\"zyx\"][jj] = derr / (np.pi * 4e-4)\n self.data[\"zyx\"][2, jj] += 1j * rvalue / (np.pi * 4e-4)\n self.data[\"zyx\"][3, jj] = rerr\n if \"z\" in self.mode:\n if \"TE\" in self.mode:\n pol = \"xy\"\n elif \"TM\" in self.mode:\n pol = \"yx\"\n for ii in [0, 2]:\n self.data[\"res\" + pol][0 + ii] = (\n 0.2\n * np.abs(self.data[\"z\" + pol][0 + ii]) ** 2.0\n / self.freq\n )\n self.data[\"phase\" + pol][0 + ii] = np.rad2deg(\n np.arctan(\n self.data[\"z\" + pol][0 + ii].imag\n / self.data[\"z\" + pol][0 + ii].real\n )\n )\n\n self.data[\"res\" + pol][1 + ii] = (\n self.data[\"res\" + pol][0 + ii]\n * self.data[\"z\" + pol][1 + ii].real\n / np.abs(self.data[\"z\" + pol][0 + ii])\n )\n\n for jjj in range(len(self.freq)):\n self.data[\"phase\" + pol][\n 1 + ii, jjj\n ] = mtcc.z_error2r_phi_error(\n self.data[\"z\" + pol][0 + ii, jjj].real,\n self.data[\"z\" + pol][0 + ii, jjj].imag,\n self.data[\"z\" + pol][1 + ii, jjj].real,\n )[\n 1\n ]\n if pol == \"xy\":\n self.res_te = self.data[\"resxy\"]\n self.phase_te = self.data[\"phasexy\"]\n elif pol == \"yx\":\n self.res_tm = self.data[\"resyx\"]\n self.phase_tm = self.data[\"phaseyx\"]" }, { "identifier": "Occam1DModel", "path": "mtpy/modeling/occam1d/model.py", "snippet": "class Occam1DModel(object):\n \"\"\"\n read and write the model file fo Occam1D\n\n All depth measurements are in meters.\n\n ======================== ==================================================\n Attributes Description\n ======================== ==================================================\n _model_fn basename for model file *default* is Model1D\n _ss string spacing in model file *default* is 3*' '\n _string_fmt format of model layers *default* is '.0f'\n air_layer_height height of air layer *default* is 10000\n bottom_layer bottom of the model *default* is 50000\n itdict dictionary of values from iteration file\n iter_fn full path to iteration file\n model_depth array of model depths\n model_fn full path to model file\n model_penalty array of penalties for each model layer\n model_preference_penalty array of model preference penalties for each layer\n model_prefernce array of preferences for each layer\n model_res array of resistivities for each layer\n n_layers number of layers in the model\n num_params number of parameters to invert for (n_layers+2)\n pad_z padding of model at depth *default* is 5 blocks\n save_path path to save files\n target_depth depth of target to investigate\n z1_layer depth of first layer *default* is 10\n ======================== ==================================================\n\n ======================== ==================================================\n Methods Description\n ======================== ==================================================\n write_model_file write an Occam1D model file, where depth increases\n on a logarithmic scale\n read_model_file read an Occam1D model file\n read_iter_file read an .iter file output by Occam1D\n ======================== ==================================================\n\n :Example: ::\n\n >>> #--> make a model file\n >>> m1 = occam1d.Model()\n >>> m1.write_model_file(save_path=r\"/home/occam1d/mt01/TE\")\n \"\"\"\n\n def __init__(self, model_fn=None, **kwargs):\n self.model_fn = model_fn\n self.iter_fn = None\n\n self.n_layers = kwargs.pop(\"n_layers\", 100)\n self.bottom_layer = kwargs.pop(\"bottom_layer\", None)\n self.target_depth = kwargs.pop(\"target_depth\", None)\n self.pad_z = kwargs.pop(\"pad_z\", 5)\n self.z1_layer = kwargs.pop(\"z1_layer\", 10)\n self.air_layer_height = kwargs.pop(\"zir_layer_height\", 10000)\n self._set_layerdepth_defaults()\n\n self.save_path = kwargs.pop(\"save_path\", None)\n if self.model_fn is not None and self.save_path is None:\n self.save_path = os.path.dirname(self.model_fn)\n\n self._ss = \" \" * 3\n self._string_fmt = \".0f\"\n self._model_fn = \"Model1D\"\n self.model_res = None\n self.model_depth = None\n self.model_penalty = None\n self.model_prefernce = None\n self.model_preference_penalty = None\n self.num_params = None\n\n def _set_layerdepth_defaults(\n self, z1_threshold=3.0, bottomlayer_threshold=2.0\n ):\n \"\"\"\n set target depth, bottom layer and z1 layer, making sure all the layers\n are consistent with each other and will work in the inversion\n (e.g. check target depth is not deeper than bottom layer)\n \"\"\"\n\n if self.target_depth is None:\n if self.bottom_layer is None:\n # if neither target_depth nor bottom_layer are set, set defaults\n self.target_depth = 10000.0\n else:\n self.target_depth = mtcc.roundsf(self.bottom_layer / 5.0, 1.0)\n\n if self.bottom_layer is None:\n self.bottom_layer = 5.0 * self.target_depth\n # if bottom layer less than a factor of 2 greater than target depth then adjust deeper\n elif (\n float(self.bottom_layer) / self.target_depth < bottomlayer_threshold\n ):\n self.bottom_layer = bottomlayer_threshold * self.target_depth\n print(\n \"bottom layer not deep enough for target depth, set to {} m\".format(\n self.bottom_layer\n )\n )\n\n if self.z1_layer is None:\n self.z1_layer = mtcc.roundsf(self.target_depth / 1000.0, 0)\n elif self.target_depth / self.z1_layer < z1_threshold:\n self.z1_layer = self.target_depth / z1_threshold\n print(\n f\"z1 layer not deep enough for target depth, set to {self.z1_layer} m\"\n )\n\n def write_model_file(self, save_path=None, **kwargs):\n \"\"\"\n Makes a 1D model file for Occam1D.\n\n Arguments:\n ----------\n\n **save_path** :path to save file to, if just path saved as\n savepath\\model.mod, if None defaults to dirpath\n\n **n_layers** : number of layers\n\n **bottom_layer** : depth of bottom layer in meters\n\n **target_depth** : depth to target under investigation\n\n **pad_z** : padding on bottom of model past target_depth\n\n **z1_layer** : depth of first layer in meters\n\n **air_layer_height** : height of air layers in meters\n\n Returns:\n --------\n\n **Occam1D.modelfn** = full path to model file\n\n ..Note: This needs to be redone.\n\n :Example: ::\n\n >>> old = occam.Occam1D()\n >>> old.make1DModelFile(savepath=r\"/home/Occam1D/Line1/Inv1_TE\",\n >>> nlayers=50,bottomlayer=10000,z1layer=50)\n >>> Wrote Model file: /home/Occam1D/Line1/Inv1_TE/Model1D\n \"\"\"\n if save_path is not None:\n self.save_path = save_path\n if os.path.isdir == False:\n os.mkdir(self.save_path)\n\n self.model_fn = os.path.join(self.save_path, self._model_fn)\n\n for key in list(kwargs.keys()):\n setattr(self, key, kwargs[key])\n\n if self.model_depth is None:\n # ---------create depth layers--------------------\n log_z = np.logspace(\n np.log10(self.z1_layer),\n np.log10(\n self.target_depth\n - np.logspace(\n np.log10(self.z1_layer),\n np.log10(self.target_depth),\n num=self.n_layers,\n )[-2]\n ),\n num=self.n_layers - self.pad_z,\n )\n ztarget = np.array(\n [zz - zz % 10 ** np.floor(np.log10(zz)) for zz in log_z]\n )\n log_zpad = np.logspace(\n np.log10(self.target_depth),\n np.log10(\n self.bottom_layer\n - np.logspace(\n np.log10(self.target_depth),\n np.log10(self.bottom_layer),\n num=self.pad_z,\n )[-2]\n ),\n num=self.pad_z,\n )\n zpadding = np.array(\n [zz - zz % 10 ** np.floor(np.log10(zz)) for zz in log_zpad]\n )\n z_nodes = np.append(ztarget, zpadding)\n self.model_depth = np.array(\n [z_nodes[: ii + 1].sum() for ii in range(z_nodes.shape[0])]\n )\n else:\n self.n_layers = len(self.model_depth)\n\n self.num_params = self.n_layers + 2\n # make the model file\n modfid = open(self.model_fn, \"w\")\n modfid.write(\"Format: Resistivity1DMod_1.0\" + \"\\n\")\n modfid.write(f\"#LAYERS: {self.num_params}\\n\")\n modfid.write(\"!Set free values to -1 or ? \\n\")\n modfid.write(\n \"!penalize between 1 and 0,\"\n + \"0 allowing jump between layers and 1 smooth. \\n\"\n )\n modfid.write(\n \"!preference is the assumed resistivity on linear scale. \\n\"\n )\n modfid.write(\n \"!pref_penalty needs to be put if preference is not 0 [0,1]. \\n\"\n )\n modfid.write(\n \"! {0}\\n\".format(\n self._ss.join(\n [\n \"top_depth\",\n \"resistivity\",\n \"penalty\",\n \"preference\",\n \"pref_penalty\",\n ]\n )\n )\n )\n modfid.write(\n self._ss.join(\n [\n str(-self.air_layer_height),\n \"1d12\",\n \"0\",\n \"0\",\n \"0\",\n \"!air layer\",\n \"\\n\",\n ]\n )\n )\n modfid.write(\n self._ss.join(\n [\"0\", \"-1\", \"0\", \"0\", \"0\", \"!first ground layer\", \"\\n\"]\n )\n )\n for ll in self.model_depth:\n modfid.write(\n self._ss.join(\n [\n f\"{np.ceil(ll):{{1}}}\",\n \"-1\",\n \"1\",\n \"0\",\n \"0\",\n \"\\n\",\n ]\n )\n )\n\n modfid.close()\n\n print(f\"Wrote Model file: {self.model_fn}\")\n\n def read_model_file(self, model_fn=None):\n \"\"\"\n\n will read in model 1D file\n\n Arguments:\n ----------\n **modelfn** : full path to model file\n\n Fills attributes:\n --------\n\n * model_depth' : depth of model in meters\n\n * model_res : value of resisitivity\n\n * model_penalty : penalty\n\n * model_preference : preference\n\n * model_penalty_preference : preference penalty\n\n :Example: ::\n\n >>> m1 = occam1d.Model()\n >>> m1.savepath = r\"/home/Occam1D/Line1/Inv1_TE\"\n >>> m1.read_model_file()\n \"\"\"\n if model_fn is not None:\n self.model_fn = model_fn\n if self.model_fn is None:\n raise IOError(\"Need to input a model file\")\n elif os.path.isfile(self.model_fn) == False:\n raise IOError(f\"Could not find{self.model_fn}, check path\")\n\n self._model_fn = os.path.basename(self.model_fn)\n self.save_path = os.path.dirname(self.model_fn)\n mfid = open(self.model_fn, \"r\")\n mlines = mfid.readlines()\n mfid.close()\n mdict = {}\n mdict[\"nparam\"] = 0\n for key in [\"depth\", \"res\", \"pen\", \"pref\", \"prefpen\"]:\n mdict[key] = []\n\n for mm, mline in enumerate(mlines):\n if mline.find(\"!\") == 0:\n pass\n elif mline.find(\":\") >= 0:\n mlst = mline.strip().split(\":\")\n mdict[mlst[0]] = mlst[1]\n else:\n mlst = mline.strip().split()\n mdict[\"depth\"].append(float(mlst[0]))\n if mlst[1] == \"?\":\n mdict[\"res\"].append(-1)\n elif mlst[1] == \"1d12\":\n mdict[\"res\"].append(1.0e12)\n else:\n try:\n mdict[\"res\"].append(float(mlst[1]))\n except ValueError:\n mdict[\"res\"].append(-1)\n mdict[\"pen\"].append(float(mlst[2]))\n mdict[\"pref\"].append(float(mlst[3]))\n mdict[\"prefpen\"].append(float(mlst[4]))\n if mlst[1] == \"-1\" or mlst[1] == \"?\":\n mdict[\"nparam\"] += 1\n\n # make everything an array\n for key in [\"depth\", \"res\", \"pen\", \"pref\", \"prefpen\"]:\n mdict[key] = np.array(mdict[key])\n\n # create an array with empty columns to put the TE and TM models into\n mres = np.zeros((len(mdict[\"res\"]), 2))\n mres[:, 0] = mdict[\"res\"]\n mdict[\"res\"] = mres\n\n # make attributes\n self.model_res = mdict[\"res\"]\n self.model_depth = mdict[\"depth\"]\n self.model_penalty = mdict[\"pen\"]\n self.model_prefernce = mdict[\"pref\"]\n self.model_preference_penalty = mdict[\"prefpen\"]\n self.num_params = mdict[\"nparam\"]\n\n def read_iter_file(self, iter_fn=None, model_fn=None):\n \"\"\"\n read an 1D iteration file\n\n Arguments:\n ----------\n **imode** : mode to read from\n\n Returns:\n --------\n **Occam1D.itdict** : dictionary with keys of the header:\n\n **model_res** : fills this array with the appropriate\n values (0) for data, (1) for model\n\n :Example: ::\n\n >>> m1 = occam1d.Model()\n >>> m1.model_fn = r\"/home/occam1d/mt01/TE/Model1D\"\n >>> m1.read_iter_file(r\"/home/Occam1D/Inv1_TE/M01TE_15.iter\")\n\n \"\"\"\n\n if iter_fn is not None:\n self.iter_fn = iter_fn\n\n if self.iter_fn is None:\n raise IOError(\"Need to input iteration file\")\n\n if model_fn is not None:\n self.model_fn = model_fn\n if self.model_fn is None:\n raise IOError(\"Need to input a model file\")\n else:\n self.read_model_file()\n\n freeparams = np.where(self.model_res == -1)[0]\n\n with open(self.iter_fn, \"r\") as ifid:\n ilines = ifid.readlines()\n\n self.itdict = {}\n model = []\n for ii, iline in enumerate(ilines):\n if iline.find(\":\") >= 0:\n ikey = iline[0:20].strip()\n ivalue = iline[20:].split(\"!\")[0].strip()\n self.itdict[ikey[:-1]] = ivalue\n else:\n try:\n ilst = iline.strip().split()\n for kk in ilst:\n model.append(float(kk))\n except ValueError:\n pass\n\n # put the model values into the model dictionary into the res array\n # for easy manipulation and access.\n model = np.array(model)\n self.model_res[freeparams, 1] = model" } ]
from pathlib import Path from mtpy.modeling.occam1d import Occam1DData, Occam1DModel import time import numpy as np
12,107
self.startup_fn = None self.rough_type = 1 self.max_iter = 20 self.target_rms = 1 self.start_rho = 100 self.description = "1D_Occam_Inv" self.start_lagrange = 5.0 self.start_rough = 1.0e7 self.debug_level = 1 self.start_iter = 0 self.start_misfit = 100 self.min_max_bounds = None self.model_step = None self._startup_fn = "OccamStartup1D" self._ss = " " * 3 for key, value in kwargs.items(): setattr(self, key, value) @property def data_fn(self): return self._data_fn @data_fn.setter def data_fn(self, fn): if fn is not None: self._data_fn = Path(fn) else: self._data_fn = None @property def model_fn(self): return self._model_fn @model_fn.setter def model_fn(self, fn): if fn is not None: self._model_fn = Path(fn) else: self._model_fn = None def write_startup_file(self, save_path=None, **kwargs): """ Make a 1D input file for Occam 1D Arguments: --------- **savepath** : full path to save input file to, if just path then saved as savepath/input **model_fn** : full path to model file, if None then assumed to be in savepath/model.mod **data_fn** : full path to data file, if None then assumed to be in savepath/TE.dat or TM.dat **rough_type** : roughness type. *default* = 0 **max_iter** : maximum number of iterations. *default* = 20 **target_rms** : target rms value. *default* = 1.0 **start_rho** : starting resistivity value on linear scale. *default* = 100 **description** : description of the inversion. **start_lagrange** : starting Lagrange multiplier for smoothness. *default* = 5 **start_rough** : starting roughness value. *default* = 1E7 **debuglevel** : something to do with how Fortran debuggs the code Almost always leave at *default* = 1 **start_iter** : the starting iteration number, handy if the starting model is from a previous run. *default* = 0 **start_misfit** : starting misfit value. *default* = 100 Returns: -------- **Occam1D.inputfn** : full path to input file. :Example: :: >>> old = occam.Occam1D() >>> old.make1DdataFile('MT01',edipath=r"/home/Line1", >>> savepath=r"/home/Occam1D/Line1/Inv1_TE", >>> mode='TE') >>> Wrote Data File: /home/Occam1D/Line1/Inv1_TE/MT01TE.dat >>> >>> old.make1DModelFile(savepath=r"/home/Occam1D/Line1/Inv1_TE", >>> nlayers=50,bottomlayer=10000,z1layer=50) >>> Wrote Model file: /home/Occam1D/Line1/Inv1_TE/Model1D >>> >>> old.make1DInputFile(rhostart=10,targetrms=1.5,maxiter=15) >>> Wrote Input File: /home/Occam1D/Line1/Inv1_TE/Input1D """ if save_path is not None: self.save_path = save_path if not self.save_path.is_dir(): self.save_path.mkdir() self.startup_fn = self.save_path.joinpath(self._startup_fn) # --> read data file if self.data_fn is None: raise IOError("Need to input data file name.") else: data = Occam1DData() data.read_data_file(self.data_fn) # --> read model file if self.model_fn is None: raise IOError("Need to input model file name.") else:
# -*- coding: utf-8 -*- """ Created on Mon Oct 30 13:32:42 2023 @author: jpeacock """ # ============================================================================= # Imports # ============================================================================= # ============================================================================= class Occam1DStartup(object): """ read and write input files for Occam1D ====================== ==================================================== Attributes Description ====================== ==================================================== _ss string spacing _startup_fn basename of startup file *default* is OccamStartup1D data_fn full path to data file debug_level debug level *default* is 1 description description of inversion for your self *default* is 1D_Occam_Inv max_iter maximum number of iterations *default* is 20 model_fn full path to model file rough_type roughness type *default* is 1 save_path full path to save files to start_iter first iteration number *default* is 0 start_lagrange starting lagrange number on log scale *default* is 5 start_misfit starting misfit value *default* is 100 start_rho starting resistivity value (halfspace) in log scale *default* is 100 start_rough starting roughness (ignored by Occam1D) *default* is 1E7 startup_fn full path to startup file target_rms target rms *default* is 1.0 ====================== ==================================================== """ def __init__(self, data_fn=None, model_fn=None, **kwargs): self.data_fn = data_fn self.model_fn = model_fn if self.data_fn is not None: self.save_path = self.data_fn.parent elif self.model_fn is not None: self.save_path = self.model_fn.parent self.startup_fn = None self.rough_type = 1 self.max_iter = 20 self.target_rms = 1 self.start_rho = 100 self.description = "1D_Occam_Inv" self.start_lagrange = 5.0 self.start_rough = 1.0e7 self.debug_level = 1 self.start_iter = 0 self.start_misfit = 100 self.min_max_bounds = None self.model_step = None self._startup_fn = "OccamStartup1D" self._ss = " " * 3 for key, value in kwargs.items(): setattr(self, key, value) @property def data_fn(self): return self._data_fn @data_fn.setter def data_fn(self, fn): if fn is not None: self._data_fn = Path(fn) else: self._data_fn = None @property def model_fn(self): return self._model_fn @model_fn.setter def model_fn(self, fn): if fn is not None: self._model_fn = Path(fn) else: self._model_fn = None def write_startup_file(self, save_path=None, **kwargs): """ Make a 1D input file for Occam 1D Arguments: --------- **savepath** : full path to save input file to, if just path then saved as savepath/input **model_fn** : full path to model file, if None then assumed to be in savepath/model.mod **data_fn** : full path to data file, if None then assumed to be in savepath/TE.dat or TM.dat **rough_type** : roughness type. *default* = 0 **max_iter** : maximum number of iterations. *default* = 20 **target_rms** : target rms value. *default* = 1.0 **start_rho** : starting resistivity value on linear scale. *default* = 100 **description** : description of the inversion. **start_lagrange** : starting Lagrange multiplier for smoothness. *default* = 5 **start_rough** : starting roughness value. *default* = 1E7 **debuglevel** : something to do with how Fortran debuggs the code Almost always leave at *default* = 1 **start_iter** : the starting iteration number, handy if the starting model is from a previous run. *default* = 0 **start_misfit** : starting misfit value. *default* = 100 Returns: -------- **Occam1D.inputfn** : full path to input file. :Example: :: >>> old = occam.Occam1D() >>> old.make1DdataFile('MT01',edipath=r"/home/Line1", >>> savepath=r"/home/Occam1D/Line1/Inv1_TE", >>> mode='TE') >>> Wrote Data File: /home/Occam1D/Line1/Inv1_TE/MT01TE.dat >>> >>> old.make1DModelFile(savepath=r"/home/Occam1D/Line1/Inv1_TE", >>> nlayers=50,bottomlayer=10000,z1layer=50) >>> Wrote Model file: /home/Occam1D/Line1/Inv1_TE/Model1D >>> >>> old.make1DInputFile(rhostart=10,targetrms=1.5,maxiter=15) >>> Wrote Input File: /home/Occam1D/Line1/Inv1_TE/Input1D """ if save_path is not None: self.save_path = save_path if not self.save_path.is_dir(): self.save_path.mkdir() self.startup_fn = self.save_path.joinpath(self._startup_fn) # --> read data file if self.data_fn is None: raise IOError("Need to input data file name.") else: data = Occam1DData() data.read_data_file(self.data_fn) # --> read model file if self.model_fn is None: raise IOError("Need to input model file name.") else:
model = Occam1DModel()
1
2023-10-11 22:24:50+00:00
16k
Jacoo-ai/HIC-Yolov5
detect.py
[ { "identifier": "attempt_load", "path": "models/experimental.py", "snippet": "def attempt_load(weights, map_location=None, inplace=True, fuse=True):\n from models.yolo import Detect, Model\n\n # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a\n model = Ensemble()\n for w in weights if isinstance(weights, list) else [weights]:\n ckpt = torch.load(attempt_download(w), map_location=map_location) # load\n if fuse:\n model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model\n else:\n model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().eval()) # without layer fuse\n\n # Compatibility updates\n for m in model.modules():\n if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model]:\n m.inplace = inplace # pytorch 1.7.0 compatibility\n if type(m) is Detect:\n if not isinstance(m.anchor_grid, list): # new Detect Layer compatibility\n delattr(m, 'anchor_grid')\n setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)\n elif type(m) is Conv:\n m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility\n\n if len(model) == 1:\n return model[-1] # return model\n else:\n print(f'Ensemble created with {weights}\\n')\n for k in ['names']:\n setattr(model, k, getattr(model[-1], k))\n model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride\n return model # return ensemble" }, { "identifier": "LoadImages", "path": "utils/datasets.py", "snippet": "class LoadImages:\n # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`\n def __init__(self, path, img_size=640, stride=32, auto=True):\n p = str(Path(path).resolve()) # os-agnostic absolute path\n if '*' in p:\n files = sorted(glob.glob(p, recursive=True)) # glob\n elif os.path.isdir(p):\n files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir\n elif os.path.isfile(p):\n files = [p] # files\n else:\n raise Exception(f'ERROR: {p} does not exist')\n\n images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]\n videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]\n ni, nv = len(images), len(videos)\n\n self.img_size = img_size\n self.stride = stride\n self.files = images + videos\n self.nf = ni + nv # number of files\n self.video_flag = [False] * ni + [True] * nv\n self.mode = 'image'\n self.auto = auto\n if any(videos):\n self.new_video(videos[0]) # new video\n else:\n self.cap = None\n assert self.nf > 0, f'No images or videos found in {p}. ' \\\n f'Supported formats are:\\nimages: {IMG_FORMATS}\\nvideos: {VID_FORMATS}'\n\n def __iter__(self):\n self.count = 0\n return self\n\n def __next__(self):\n if self.count == self.nf:\n raise StopIteration\n path = self.files[self.count]\n\n if self.video_flag[self.count]:\n # Read video\n self.mode = 'video'\n ret_val, img0 = self.cap.read()\n if not ret_val:\n self.count += 1\n self.cap.release()\n if self.count == self.nf: # last video\n raise StopIteration\n else:\n path = self.files[self.count]\n self.new_video(path)\n ret_val, img0 = self.cap.read()\n\n self.frame += 1\n print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ', end='')\n\n else:\n # Read image\n self.count += 1\n img0 = cv2.imread(path) # BGR\n assert img0 is not None, 'Image Not Found ' + path\n print(f'image {self.count}/{self.nf} {path}: ', end='')\n\n # Padded resize\n img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0]\n\n # Convert\n img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB\n img = np.ascontiguousarray(img)\n\n return path, img, img0, self.cap\n\n def new_video(self, path):\n self.frame = 0\n self.cap = cv2.VideoCapture(path)\n self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n def __len__(self):\n return self.nf # number of files" }, { "identifier": "LoadStreams", "path": "utils/datasets.py", "snippet": "class LoadStreams:\n # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`\n def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True):\n self.mode = 'stream'\n self.img_size = img_size\n self.stride = stride\n\n if os.path.isfile(sources):\n with open(sources, 'r') as f:\n sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]\n else:\n sources = [sources]\n\n n = len(sources)\n self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n\n self.sources = [clean_str(x) for x in sources] # clean source names for later\n self.auto = auto\n for i, s in enumerate(sources): # index, source\n # Start thread to read frames from video stream\n print(f'{i + 1}/{n}: {s}... ', end='')\n if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video\n check_requirements(('pafy', 'youtube_dl'))\n import pafy\n s = pafy.new(s).getbest(preftype=\"mp4\").url # YouTube URL\n s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam\n cap = cv2.VideoCapture(s)\n assert cap.isOpened(), f'Failed to open {s}'\n w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback\n self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback\n\n _, self.imgs[i] = cap.read() # guarantee first frame\n self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)\n print(f\" success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)\")\n self.threads[i].start()\n print('') # newline\n\n # check for common shapes\n s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs])\n self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal\n if not self.rect:\n print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')\n\n def update(self, i, cap, stream):\n # Read stream `i` frames in daemon thread\n n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame\n while cap.isOpened() and n < f:\n n += 1\n # _, self.imgs[index] = cap.read()\n cap.grab()\n if n % read == 0:\n success, im = cap.retrieve()\n if success:\n self.imgs[i] = im\n else:\n print('WARNING: Video stream unresponsive, please check your IP camera connection.')\n self.imgs[i] *= 0\n cap.open(stream) # re-open stream if signal was lost\n time.sleep(1 / self.fps[i]) # wait time\n\n def __iter__(self):\n self.count = -1\n return self\n\n def __next__(self):\n self.count += 1\n if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit\n cv2.destroyAllWindows()\n raise StopIteration\n\n # Letterbox\n img0 = self.imgs.copy()\n img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0]\n\n # Stack\n img = np.stack(img, 0)\n\n # Convert\n img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW\n img = np.ascontiguousarray(img)\n\n return self.sources, img, img0, None\n\n def __len__(self):\n return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years" }, { "identifier": "apply_classifier", "path": "utils/general.py", "snippet": "def apply_classifier(x, model, img, im0):\n # Apply a second stage classifier to yolo outputs\n im0 = [im0] if isinstance(im0, np.ndarray) else im0\n for i, d in enumerate(x): # per image\n if d is not None and len(d):\n d = d.clone()\n\n # Reshape and pad cutouts\n b = xyxy2xywh(d[:, :4]) # boxes\n b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square\n b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad\n d[:, :4] = xywh2xyxy(b).long()\n\n # Rescale boxes from img_size to im0 size\n scale_coords(img.shape[2:], d[:, :4], im0[i].shape)\n\n # Classes\n pred_cls1 = d[:, 5].long()\n ims = []\n for j, a in enumerate(d): # per item\n cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]\n im = cv2.resize(cutout, (224, 224)) # BGR\n # cv2.imwrite('example%i.jpg' % j, cutout)\n\n im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416\n im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32\n im /= 255.0 # 0 - 255 to 0.0 - 1.0\n ims.append(im)\n\n pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction\n x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections\n\n return x" }, { "identifier": "check_img_size", "path": "utils/general.py", "snippet": "def check_img_size(imgsz, s=32, floor=0):\n # Verify image size is a multiple of stride s in each dimension\n if isinstance(imgsz, int): # integer i.e. img_size=640\n new_size = max(make_divisible(imgsz, int(s)), floor)\n else: # list i.e. img_size=[640, 480]\n new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz]\n if new_size != imgsz:\n print(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}')\n return new_size" }, { "identifier": "check_imshow", "path": "utils/general.py", "snippet": "def check_imshow():\n # Check if environment supports image displays\n try:\n assert not is_docker(), 'cv2.imshow() is disabled in Docker environments'\n assert not is_colab(), 'cv2.imshow() is disabled in Google Colab environments'\n cv2.imshow('test', np.zeros((1, 1, 3)))\n cv2.waitKey(1)\n cv2.destroyAllWindows()\n cv2.waitKey(1)\n return True\n except Exception as e:\n print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\\n{e}')\n return False" }, { "identifier": "check_requirements", "path": "utils/general.py", "snippet": "@try_except\ndef check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True):\n # Check installed dependencies meet requirements (pass *.txt file or list of packages)\n prefix = colorstr('red', 'bold', 'requirements:')\n check_python() # check python version\n if isinstance(requirements, (str, Path)): # requirements.txt file\n file = Path(requirements)\n assert file.exists(), f\"{prefix} {file.resolve()} not found, check failed.\"\n requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude]\n else: # list or tuple of packages\n requirements = [x for x in requirements if x not in exclude]\n\n n = 0 # number of packages updates\n for r in requirements:\n try:\n pkg.require(r)\n except Exception as e: # DistributionNotFound or VersionConflict if requirements not met\n s = f\"{prefix} {r} not found and is required by YOLOv5\"\n if install:\n print(f\"{s}, attempting auto-update...\")\n try:\n assert check_online(), f\"'pip install {r}' skipped (offline)\"\n print(check_output(f\"pip install '{r}'\", shell=True).decode())\n n += 1\n except Exception as e:\n print(f'{prefix} {e}')\n else:\n print(f'{s}. Please install and rerun your command.')\n\n if n: # if packages updated\n source = file.resolve() if 'file' in locals() else requirements\n s = f\"{prefix} {n} package{'s' * (n > 1)} updated per {source}\\n\" \\\n f\"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\\n\"\n print(emojis(s))" }, { "identifier": "check_suffix", "path": "utils/general.py", "snippet": "def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''):\n # Check file(s) for acceptable suffixes\n if file and suffix:\n if isinstance(suffix, str):\n suffix = [suffix]\n for f in file if isinstance(file, (list, tuple)) else [file]:\n assert Path(f).suffix.lower() in suffix, f\"{msg}{f} acceptable suffix is {suffix}\"" }, { "identifier": "colorstr", "path": "utils/general.py", "snippet": "def colorstr(*input):\n # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')\n *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string\n colors = {'black': '\\033[30m', # basic colors\n 'red': '\\033[31m',\n 'green': '\\033[32m',\n 'yellow': '\\033[33m',\n 'blue': '\\033[34m',\n 'magenta': '\\033[35m',\n 'cyan': '\\033[36m',\n 'white': '\\033[37m',\n 'bright_black': '\\033[90m', # bright colors\n 'bright_red': '\\033[91m',\n 'bright_green': '\\033[92m',\n 'bright_yellow': '\\033[93m',\n 'bright_blue': '\\033[94m',\n 'bright_magenta': '\\033[95m',\n 'bright_cyan': '\\033[96m',\n 'bright_white': '\\033[97m',\n 'end': '\\033[0m', # misc\n 'bold': '\\033[1m',\n 'underline': '\\033[4m'}\n return ''.join(colors[x] for x in args) + f'{string}' + colors['end']" }, { "identifier": "increment_path", "path": "utils/general.py", "snippet": "def increment_path(path, exist_ok=False, sep='', mkdir=False):\n # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.\n path = Path(path) # os-agnostic\n if path.exists() and not exist_ok:\n suffix = path.suffix\n path = path.with_suffix('')\n dirs = glob.glob(f\"{path}{sep}*\") # similar paths\n matches = [re.search(rf\"%s{sep}(\\d+)\" % path.stem, d) for d in dirs]\n i = [int(m.groups()[0]) for m in matches if m] # indices\n n = max(i) + 1 if i else 2 # increment number\n path = Path(f\"{path}{sep}{n}{suffix}\") # update path\n dir = path if path.suffix == '' else path.parent # directory\n if not dir.exists() and mkdir:\n dir.mkdir(parents=True, exist_ok=True) # make directory\n return path" }, { "identifier": "non_max_suppression", "path": "utils/general.py", "snippet": "def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,\n labels=(), max_det=300):\n \"\"\"Runs Non-Maximum Suppression (NMS) on inference results\n\n Returns:\n list of detections, on (n,6) tensor per image [xyxy, conf, cls]\n \"\"\"\n\n nc = prediction.shape[2] - 5 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n\n # Checks\n assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'\n assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'\n\n # Settings\n min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()\n time_limit = 10.0 # seconds to quit after\n redundant = True # require redundant detections\n multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)\n merge = False # use merge-NMS\n\n t = time.time()\n output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence\n\n # Cat apriori labels if autolabelling\n if labels and len(labels[xi]):\n l = labels[xi]\n v = torch.zeros((len(l), nc + 5), device=x.device)\n v[:, :4] = l[:, 1:5] # box\n v[:, 4] = 1.0 # conf\n v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls\n x = torch.cat((x, v), 0)\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = xywh2xyxy(x[:, :4])\n\n # Detections matrix nx6 (xyxy, conf, cls)\n if multi_label:\n i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T\n x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\n else: # best class only\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n\n # Filter by class\n if classes is not None:\n x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n\n # Apply finite constraint\n # if not torch.isfinite(x).all():\n # x = x[torch.isfinite(x).all(1)]\n\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence\n\n # Batched NMS\n c = x[:, 5:6] * (0 if agnostic else max_wh) # classes\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\n i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)\n # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)\n iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix\n weights = iou * scores[None] # box weights\n x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes\n if redundant:\n i = i[iou.sum(1) > 1] # require redundancy\n\n output[xi] = x[i]\n if (time.time() - t) > time_limit:\n print(f'WARNING: NMS time limit {time_limit}s exceeded')\n break # time limit exceeded\n\n return output" }, { "identifier": "print_args", "path": "utils/general.py", "snippet": "def print_args(name, opt):\n # Print argparser arguments\n print(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items()))" }, { "identifier": "save_one_box", "path": "utils/general.py", "snippet": "def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False, save=True):\n # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop\n xyxy = torch.tensor(xyxy).view(-1, 4)\n b = xyxy2xywh(xyxy) # boxes\n if square:\n b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square\n b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad\n xyxy = xywh2xyxy(b).long()\n clip_coords(xyxy, im.shape)\n crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)]\n if save:\n cv2.imwrite(str(increment_path(file, mkdir=True).with_suffix('.jpg')), crop)\n return crop" }, { "identifier": "scale_coords", "path": "utils/general.py", "snippet": "def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):\n # Rescale coords (xyxy) from img1_shape to img0_shape\n if ratio_pad is None: # calculate from img0_shape\n gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new\n pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding\n else:\n gain = ratio_pad[0][0]\n pad = ratio_pad[1]\n\n coords[:, [0, 2]] -= pad[0] # x padding\n coords[:, [1, 3]] -= pad[1] # y padding\n coords[:, :4] /= gain\n clip_coords(coords, img0_shape)\n return coords" }, { "identifier": "set_logging", "path": "utils/general.py", "snippet": "def set_logging(rank=-1, verbose=True):\n logging.basicConfig(\n format=\"%(message)s\",\n level=logging.INFO if (verbose and rank in [-1, 0]) else logging.WARN)" }, { "identifier": "strip_optimizer", "path": "utils/general.py", "snippet": "def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()\n # Strip optimizer from 'f' to finalize training, optionally save as 's'\n x = torch.load(f, map_location=torch.device('cpu'))\n if x.get('ema'):\n x['model'] = x['ema'] # replace model with ema\n for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys\n x[k] = None\n x['epoch'] = -1\n x['model'].half() # to FP16\n for p in x['model'].parameters():\n p.requires_grad = False\n torch.save(x, s or f)\n mb = os.path.getsize(s or f) / 1E6 # filesize\n print(f\"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB\")" }, { "identifier": "xyxy2xywh", "path": "utils/general.py", "snippet": "def xyxy2xywh(x):\n # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center\n y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center\n y[:, 2] = x[:, 2] - x[:, 0] # width\n y[:, 3] = x[:, 3] - x[:, 1] # height\n return y" }, { "identifier": "Annotator", "path": "utils/plots.py", "snippet": "CONFIG_DIR = user_config_dir() # Ultralytics settings dir\nRANK = int(os.getenv('RANK', -1))\nclass Colors:\nclass Annotator:\n def __init__(self):\n def __call__(self, i, bgr=False):\n def hex2rgb(h): # rgb order (PIL)\ndef check_font(font='Arial.ttf', size=10):\n def __init__(self, im, line_width=None, font_size=None, font='', pil=False, example='abc'):\n def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):\n def rectangle(self, xy, fill=None, outline=None, width=1):\n def text(self, xy, text, txt_color=(255, 255, 255)):\n def result(self):\ndef hist2d(x, y, n=100):\ndef butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):\n def butter_lowpass(cutoff, fs, order):\ndef output_to_target(output):\ndef plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=1920, max_subplots=16):\ndef plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):\ndef plot_val_txt(): # from utils.plots import *; plot_val()\ndef plot_targets_txt(): # from utils.plots import *; plot_targets_txt()\ndef plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study()\ndef plot_labels(labels, names=(), save_dir=Path('')):\ndef profile_idetection(start=0, stop=0, labels=(), save_dir=''):\ndef plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve()\ndef plot_results(file='path/to/results.csv', dir=''):\ndef feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):" }, { "identifier": "load_classifier", "path": "utils/torch_utils.py", "snippet": "def load_classifier(name='resnet101', n=2):\n # Loads a pretrained model reshaped to n-class output\n model = torchvision.models.__dict__[name](pretrained=True)\n\n # ResNet model properties\n # input_size = [3, 224, 224]\n # input_space = 'RGB'\n # input_range = [0, 1]\n # mean = [0.485, 0.456, 0.406]\n # std = [0.229, 0.224, 0.225]\n\n # Reshape output to n classes\n filters = model.fc.weight.shape[1]\n model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True)\n model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True)\n model.fc.out_features = n\n return model" }, { "identifier": "select_device", "path": "utils/torch_utils.py", "snippet": "def select_device(device='', batch_size=None):\n # device = 'cpu' or '0' or '0,1,2,3'\n s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string\n device = str(device).strip().lower().replace('cuda:', '') # to string, 'cuda:0' to '0'\n cpu = device == 'cpu'\n if cpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable\n assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability\n\n cuda = not cpu and torch.cuda.is_available()\n if cuda:\n devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7\n n = len(devices) # device count\n if n > 1 and batch_size: # check batch_size is divisible by device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * (len(s) + 1)\n for i, d in enumerate(devices):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\\n\" # bytes to MB\n else:\n s += 'CPU\\n'\n\n LOGGER.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe\n return torch.device('cuda:0' if cuda else 'cpu')" }, { "identifier": "time_sync", "path": "utils/torch_utils.py", "snippet": "def time_sync():\n # pytorch-accurate time\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n return time.time()" } ]
import argparse import os import sys import cv2 import numpy as np import torch import torch.backends.cudnn as cudnn import onnxruntime import tensorflow as tf from pathlib import Path from models.experimental import attempt_load from utils.datasets import LoadImages, LoadStreams from utils.general import apply_classifier, check_img_size, check_imshow, check_requirements, check_suffix, colorstr, \ increment_path, non_max_suppression, print_args, save_one_box, scale_coords, set_logging, \ strip_optimizer, xyxy2xywh from utils.plots import Annotator, colors from utils.torch_utils import load_classifier, select_device, time_sync
11,167
t2 = time_sync() dt[0] += t2 - t1 # Inference if pt: visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred = model(img, augment=augment, visualize=visualize)[0] elif onnx: if dnn: net.setInput(img) pred = torch.tensor(net.forward()) else: pred = torch.tensor(session.run([session.get_outputs()[0].name], {session.get_inputs()[0].name: img})) else: # tensorflow model (tflite, pb, saved_model) imn = img.permute(0, 2, 3, 1).cpu().numpy() # image in numpy if pb: pred = frozen_func(x=tf.constant(imn)).numpy() elif saved_model: pred = model(imn, training=False).numpy() elif tflite: if int8: scale, zero_point = input_details[0]['quantization'] imn = (imn / scale + zero_point).astype(np.uint8) # de-scale interpreter.set_tensor(input_details[0]['index'], imn) interpreter.invoke() pred = interpreter.get_tensor(output_details[0]['index']) if int8: scale, zero_point = output_details[0]['quantization'] pred = (pred.astype(np.float32) - zero_point) * scale # re-scale pred[..., 0] *= imgsz[1] # x pred[..., 1] *= imgsz[0] # y pred[..., 2] *= imgsz[1] # w pred[..., 3] *= imgsz[0] # h pred = torch.tensor(pred) t3 = time_sync() dt[1] += t3 - t2 # NMS pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) dt[2] += time_sync() - t3 # Second-stage classifier (optional) if classify: pred = apply_classifier(pred, modelc, img, im0s) # Process predictions for i, det in enumerate(pred): # per image seen += 1 if webcam: # batch_size >= 1 p, s, im0, frame = path[i], f'{i}: ', im0s[i].copy(), dataset.count else: p, s, im0, frame = path, '', im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # img.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt s += '%gx%g ' % img.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale boxes from img_size to im0 size det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *xyxy, conf, cls in reversed(det): if save_txt: # Write to file xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class # label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') label = None annotator.box_label(xyxy, label, color=colors(c, True)) if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) # Print time (inference-only) print(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path += '.mp4' vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' print(f"Results saved to {colorstr('bold', save_dir)}{s}") if update:
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Run inference on images, videos, directories, streams, etc. Usage: $ python path/to/detect.py --source path/to/img.jpg --weights yolov5s.pt --img 640 """ FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative @torch.no_grad() def run(weights=ROOT / 'yolov5m.pt', # model.pt path(s) source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam imgsz=640, # inference size (pixels) conf_thres=0.25, # confidence threshold iou_thres=0.45, # NMS IOU threshold max_det=1000, # maximum detections per image device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu view_img=False, # show results save_txt=False, # save results to *.txt save_conf=False, # save confidences in --save-txt labels save_crop=False, # save cropped prediction boxes nosave=False, # do not save images/videos classes=None, # filter by class: --class 0, or --class 0 2 3 agnostic_nms=False, # class-agnostic NMS augment=False, # augmented inference visualize=False, # visualize features update=False, # update all models project=ROOT / 'runs/detect', # save results to project/name name='exp', # save results to project/name exist_ok=False, # existing project/name ok, do not increment line_thickness=3, # bounding box thickness (pixels) hide_labels=False, # hide labels hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference ): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( ('rtsp://', 'rtmp://', 'http://', 'https://')) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Initialize set_logging() device = select_device(device) half &= device.type != 'cpu' # half precision only supported on CUDA # Load model w = str(weights[0] if isinstance(weights, list) else weights) classify, suffix, suffixes = False, Path(w).suffix.lower(), ['.pt', '.onnx', '.tflite', '.pb', ''] check_suffix(w, suffixes) # check weights have acceptable suffix pt, onnx, tflite, pb, saved_model = (suffix == x for x in suffixes) # backend booleans stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults if pt: model = torch.jit.load(w) if 'torchscript' in w else attempt_load(weights, map_location=device) stride = int(model.stride.max()) # model stride names = model.module.names if hasattr(model, 'module') else model.names # get class names if half: model.half() # to FP16 if classify: # second-stage classifier modelc = load_classifier(name='resnet50', n=2) # initialize modelc.load_state_dict(torch.load('resnet50.pt', map_location=device)['model']).to(device).eval() elif onnx: if dnn: # check_requirements(('opencv-python>=4.5.4',)) net = cv2.dnn.readNetFromONNX(w) else: check_requirements(('onnx', 'onnxruntime')) session = onnxruntime.InferenceSession(w, None) else: # TensorFlow models check_requirements(('tensorflow>=2.4.1',)) if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt def wrap_frozen_graph(gd, inputs, outputs): x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped import return x.prune(tf.nest.map_structure(x.graph.as_graph_element, inputs), tf.nest.map_structure(x.graph.as_graph_element, outputs)) graph_def = tf.Graph().as_graph_def() graph_def.ParseFromString(open(w, 'rb').read()) frozen_func = wrap_frozen_graph(gd=graph_def, inputs="x:0", outputs="Identity:0") elif saved_model: model = tf.keras.models.load_model(w) elif tflite: interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model interpreter.allocate_tensors() # allocate input_details = interpreter.get_input_details() # inputs output_details = interpreter.get_output_details() # outputs int8 = input_details[0]['dtype'] == np.uint8 # is TFLite quantized uint8 model imgsz = check_img_size(imgsz, s=stride) # check image size # Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference if pt and device.type != 'cpu': model(torch.zeros(1, 3, *imgsz).to(device).type_as(next(model.parameters()))) # run once dt, seen = [0.0, 0.0, 0.0], 0 for path, img, im0s, vid_cap in dataset: t1 = time_sync() if onnx: img = img.astype('float32') else: img = torch.from_numpy(img).to(device) img = img.half() if half else img.float() # uint8 to fp16/32 img = img / 255.0 # 0 - 255 to 0.0 - 1.0 if len(img.shape) == 3: img = img[None] # expand for batch dim t2 = time_sync() dt[0] += t2 - t1 # Inference if pt: visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred = model(img, augment=augment, visualize=visualize)[0] elif onnx: if dnn: net.setInput(img) pred = torch.tensor(net.forward()) else: pred = torch.tensor(session.run([session.get_outputs()[0].name], {session.get_inputs()[0].name: img})) else: # tensorflow model (tflite, pb, saved_model) imn = img.permute(0, 2, 3, 1).cpu().numpy() # image in numpy if pb: pred = frozen_func(x=tf.constant(imn)).numpy() elif saved_model: pred = model(imn, training=False).numpy() elif tflite: if int8: scale, zero_point = input_details[0]['quantization'] imn = (imn / scale + zero_point).astype(np.uint8) # de-scale interpreter.set_tensor(input_details[0]['index'], imn) interpreter.invoke() pred = interpreter.get_tensor(output_details[0]['index']) if int8: scale, zero_point = output_details[0]['quantization'] pred = (pred.astype(np.float32) - zero_point) * scale # re-scale pred[..., 0] *= imgsz[1] # x pred[..., 1] *= imgsz[0] # y pred[..., 2] *= imgsz[1] # w pred[..., 3] *= imgsz[0] # h pred = torch.tensor(pred) t3 = time_sync() dt[1] += t3 - t2 # NMS pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) dt[2] += time_sync() - t3 # Second-stage classifier (optional) if classify: pred = apply_classifier(pred, modelc, img, im0s) # Process predictions for i, det in enumerate(pred): # per image seen += 1 if webcam: # batch_size >= 1 p, s, im0, frame = path[i], f'{i}: ', im0s[i].copy(), dataset.count else: p, s, im0, frame = path, '', im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # img.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt s += '%gx%g ' % img.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale boxes from img_size to im0 size det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *xyxy, conf, cls in reversed(det): if save_txt: # Write to file xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class # label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') label = None annotator.box_label(xyxy, label, color=colors(c, True)) if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) # Print time (inference-only) print(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path += '.mp4' vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' print(f"Results saved to {colorstr('bold', save_dir)}{s}") if update:
strip_optimizer(weights) # update model (to fix SourceChangeWarning)
15
2023-10-12 08:52:01+00:00
16k
OmicsML/scDiff
scdiff/model.py
[ { "identifier": "Decoder", "path": "scdiff/modules/diffusion_model/decoder.py", "snippet": "class Decoder(nn.Module):\n def __init__(self, dim, out_dim, dropout=0., norm_type=\"layernorm\", num_layers=1, cond_num_dict=None,\n cond_emb_dim=None, cond_mask_ratio=0., act=\"gelu\", out_act=None):\n super().__init__()\n if isinstance(act, str) or act is None:\n act = create_activation(act)\n if isinstance(out_act, str) or out_act is None:\n out_act = create_activation(out_act)\n\n self.cond_num_dict = cond_num_dict\n if self.cond_num_dict is not None:\n cond_emb_dim = cond_emb_dim if cond_emb_dim is not None else dim\n self.cond_embed = EmbeddingDict(cond_num_dict, cond_emb_dim, 1, 1, None, mask_ratio=cond_mask_ratio)\n else:\n self.cond_embed = None\n\n self.layers = nn.ModuleList() # FIX: use MLP layer\n for _ in range(num_layers - 1):\n self.layers.append(nn.Sequential(\n nn.Linear(dim, dim),\n act,\n create_norm(norm_type, dim),\n nn.Dropout(dropout),\n ))\n self.layers.append(nn.Sequential(nn.Linear(dim, out_dim), out_act))\n\n def forward(self, x, conditions=None):\n if self.cond_embed is not None:\n cond_emb = self.cond_embed(conditions)[0]\n x = x + cond_emb.squeeze(1)\n\n for layer in self.layers:\n x = layer(x)\n\n return x" }, { "identifier": "Embedder", "path": "scdiff/modules/diffusion_model/embedder.py", "snippet": "class Embedder(nn.Module):\n def __init__(self, pretrained_gene_list, num_hidden, norm, activation='gelu', dropout=0.,\n gene_emb=None, fix_embedding=False):\n super().__init__()\n\n self.pretrained_gene_list = pretrained_gene_list\n self.gene_index = {j: i for i, j in enumerate(pretrained_gene_list)}\n\n if gene_emb is not None:\n self.emb = nn.Parameter(gene_emb, requires_grad=not fix_embedding)\n else:\n num_genes = len(pretrained_gene_list)\n self.emb = nn.Parameter(torch.randn([num_genes, num_hidden], dtype=torch.float32) * 0.005)\n\n if fix_embedding:\n self.emb.requires_grad = False\n\n self.post_layer = nn.Sequential(\n create_activation(activation),\n create_norm(norm, num_hidden),\n nn.Dropout(dropout),\n )\n\n def forward(self, x, pe_input=None, input_gene_list=None, input_gene_idx=None):\n assert pe_input is None # FIX: deprecate pe_input\n\n if input_gene_idx is not None:\n gene_idx = input_gene_idx\n elif input_gene_list is not None:\n gene_idx = torch.tensor([self.gene_index[o] for o in input_gene_list if o in self.gene_index]).long()\n else:\n if x.shape[1] != len(self.pretrained_gene_list):\n raise ValueError(\n 'The input gene size is not the same as the pretrained gene list. '\n 'Please provide the input gene list.',\n )\n gene_idx = torch.arange(x.shape[1]).long()\n gene_idx = gene_idx.to(x.device)\n\n feat = F.embedding(gene_idx, self.emb)\n out = torch.sparse.mm(x, feat)\n out = self.post_layer(out)\n\n return out, gene_idx" }, { "identifier": "Encoder", "path": "scdiff/modules/diffusion_model/encoder.py", "snippet": "class Encoder(nn.Module):\n\n def __init__(\n self,\n depth,\n dim,\n num_heads,\n dim_head,\n *,\n dropout=0.,\n cond_type='crossattn',\n cond_cat_input=False,\n ):\n super().__init__()\n\n self.cond_cat_input = cond_cat_input\n\n if cond_type == 'crossattn':\n self.blocks = nn.ModuleList([\n BasicTransformerBlock(dim, num_heads, dim_head, self_attn=False, cross_attn=True, context_dim=dim,\n qkv_bias=True, dropout=dropout, final_act=None)\n for _ in range(depth)])\n elif cond_type == 'mlp':\n self.blocks = nn.ModuleList([\n ConditionEncoderWrapper(nn.Sequential(\n nn.Linear(dim, dim),\n \"gelu\",\n create_norm(\"layernorm\", dim),\n nn.Dropout(dropout),\n )) for _ in range(depth)])\n elif cond_type == 'stackffn':\n self.blocks = nn.ModuleList([\n ConditionEncoderWrapper(\n FeedForward(dim, mult=4, glu=False, dropout=dropout)\n ) for _ in range(depth)])\n else:\n raise ValueError(f'Unknown conditioning type {cond_type!r}')\n\n def forward(self, x, context_list, cond_emb_list):\n # XXX: combine context_list and cond_emb_list in conditioner?..\n x = x.unsqueeze(1)\n\n stack = zip(self.blocks, reversed(context_list), reversed(cond_emb_list))\n for i, (blk, ctxt, cond_emb) in enumerate(stack):\n full_cond_emb_list = list(filter(lambda x: x is not None, (ctxt, cond_emb)))\n if self.cond_cat_input:\n full_cond_emb_list.append(x)\n full_cond_emb = torch.cat(full_cond_emb_list, dim=1) if full_cond_emb_list else None\n\n x = blk(x, context=full_cond_emb)\n\n return x.squeeze(1)" }, { "identifier": "denoising_eval", "path": "scdiff/evaluate.py", "snippet": "@torch.inference_mode()\ndef denoising_eval(true: TensorArray, pred: TensorArray, mask: TensorArray):\n true = as_tensor(true, assert_type=True)\n pred = as_tensor(pred, assert_type=True)\n mask = as_tensor(mask, assert_type=True).bool()\n\n rmse_normed = masked_rmse(pred, true, mask).item()\n corr_normed = masked_corr(pred, true, mask).item()\n global_corr_normed = PearsonCorr1d(pred[mask], true[mask]).item()\n\n # nonzero_masked = (true > 0) * mask\n # rmse_normed_nonzeros = masked_rmse(pred, true, nonzero_masked).item()\n # corr_normed_nonzeros = masked_corr(pred, true, nonzero_masked).item()\n\n corr_normed_all = PearsonCorr(pred, true).item()\n rmse_normed_all = F.mse_loss(pred, true).sqrt().item()\n\n r = scipy.stats.linregress(pred[mask].cpu().numpy(), true[mask].cpu().numpy())[2]\n # r_all = scipy.stats.linregress(pred.ravel().cpu().numpy(), true.ravel().cpu().numpy())[2]\n\n return {\n 'denoise_rmse_normed': rmse_normed,\n 'denoise_corr_normed': corr_normed,\n 'denoise_global_corr_normed': global_corr_normed,\n 'denoise_global_r2_normed': r ** 2,\n # 'denoise_rmse_normed_nonzeros': rmse_normed_nonzeros,\n # 'denoise_corr_normed_nonzeros': corr_normed_nonzeros,\n 'denoise_rmse_normed_all': rmse_normed_all,\n 'denoise_corr_normed_all': corr_normed_all,\n # 'denoise_global_r2_normed_all': r_all ** 2,\n }" }, { "identifier": "evaluate_annotation", "path": "scdiff/evaluate.py", "snippet": "@torch.inference_mode()\ndef evaluate_annotation(\n true: TensorArray,\n pred: TensorArray,\n name: Optional[str],\n) -> Dict[str, float]:\n true_array = as_array(true, assert_type=True)\n pred_array = as_array(pred, assert_type=True)\n\n le = LabelEncoder()\n le.classes_ = np.array(sorted(set(np.unique(true_array).tolist() + np.unique(pred_array).tolist())))\n\n true = torch.LongTensor(le.transform(true_array))\n pred = torch.LongTensor(le.transform(pred_array))\n\n num_classes = le.classes_.size\n # num_classes = int(max(true.max(), pred.max())) + 1\n # num_unique_classes = max(true.unique().numel(), pred.unique().numel())\n # if (num_classes == num_unique_classes + 1) and (0 not in true):\n # warnings.warn(\n # \"Implicitly removing null label (index 0)\",\n # UserWarning,\n # stacklevel=2,\n # )\n # true, pred, num_classes = true - 1, pred - 1, num_classes - 1\n # elif num_classes != num_unique_classes:\n # warnings.warn(\n # f\"Number of unique classes {num_unique_classes} mismatch the \"\n # f\"number of classes inferred by max index {num_classes}\",\n # UserWarning,\n # stacklevel=2,\n # )\n\n suffix = \"\" if name is None else f\"_{name}\"\n\n out = {}\n out[f\"acc{suffix}\"] = multiclass_accuracy(true, pred, num_classes).item()\n out[f\"f1{suffix}\"] = multiclass_f1_score(true, pred, num_classes).item()\n out[f\"precision{suffix}\"] = multiclass_precision(true, pred, num_classes).item()\n out[f\"recall{suffix}\"] = multiclass_recall(true, pred, num_classes).item()\n\n return out" }, { "identifier": "perturbation_eval", "path": "scdiff/evaluate.py", "snippet": "@torch.inference_mode()\ndef perturbation_eval(\n true,\n pred,\n control,\n true_conds=None,\n gene_names=None,\n path_to_save=None,\n de_gene_idx_dict=None,\n ndde20_idx_dict=None,\n de_gene_idx=None,\n ndde20_idx=None,\n):\n if true_conds is not None: # summarize condition wise evaluation\n assert de_gene_idx_dict is not None, \"GEARS eval require DE gene index dict\"\n assert ndde20_idx_dict is not None, \"GEARS eval require top20 none dropout DE gene index dict\"\n if path_to_save:\n warnings.warn(\n f\"Cant save with multiple conds, got {path_to_save=}. Ignoring save option\",\n UserWarning,\n stacklevel=2,\n )\n unique_true_conds = true_conds.unique(dim=0)\n score_dict_list = []\n for cond in unique_true_conds:\n cond_ind = (true_conds == cond).all(1)\n true_sub, pred_sub = true[cond_ind], pred[cond_ind]\n cond_idx_tuple = tuple(i for i in cond.tolist() if i != -1) # XXX: specificially designed for GEARS\n score_dict_list.append(perturbation_eval(true_sub, pred_sub, control, gene_names=gene_names,\n de_gene_idx=de_gene_idx_dict[cond_idx_tuple],\n ndde20_idx=ndde20_idx_dict[cond_idx_tuple]))\n scores = reduce_score_dict_list(score_dict_list)\n return scores\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n\n adata_pred = ad.AnnData(pred.detach().cpu().numpy(),\n obs={'condition': [\"pred\"] * len(pred)})\n adata_true = ad.AnnData(true.detach().cpu().numpy(),\n obs={'condition': [\"stim\"] * len(true)})\n adata_ctrl = ad.AnnData(control.detach().cpu().numpy(),\n obs={'condition': [\"ctrl\"] * len(control)})\n adata = ad.concat([adata_true, adata_ctrl])\n if gene_names is not None:\n adata.var.index = gene_names\n adata_pred.var.index = gene_names\n sc.tl.rank_genes_groups(adata, groupby='condition', method=\"wilcoxon\")\n diff_genes = adata.uns[\"rank_genes_groups\"][\"names\"]['stim']\n diff_genes_idx = [np.where(np.array(gene_names) == x)[0].item() for x in diff_genes]\n adata = ad.concat([adata, adata_pred])\n adata.obs_names_make_unique()\n scores = reg_mean_plot(\n adata,\n condition_key='condition',\n axis_keys={\"x\": \"pred\", \"y\": 'stim', \"x1\": \"ctrl\"},\n gene_list=diff_genes[:10] if gene_names is not None else None,\n top_100_genes=diff_genes[:100],\n labels={\"x\": \"predicted\", \"y\": \"ground truth\", \"x1\": \"ctrl\"},\n path_to_save=path_to_save,\n title='scDiff',\n show=False,\n legend=False,\n )\n\n true_mean = true.mean(0)\n pred_mean = pred.mean(0)\n control_mean = control.mean(0)\n true_delta_mean = true_mean - control_mean\n pred_delta_mean = pred_mean - control_mean\n\n scores.update({\n # MAE\n 'mae': (pred_mean - true_mean).abs().mean().item(),\n 'mae_top_100': (pred_mean[diff_genes_idx[:100]] - true_mean[diff_genes_idx[:100]]).abs().mean().item(),\n 'mae_delta': (pred_delta_mean - true_delta_mean).abs().mean().item(),\n # MSE\n 'mse': F.mse_loss(pred_mean, true_mean).item(),\n 'mse_top_100': F.mse_loss(pred_mean[diff_genes_idx[:100]], true_mean[diff_genes_idx[:100]]).item(),\n 'mse_delta': F.mse_loss(pred_delta_mean, true_delta_mean).item(),\n # RMSE\n 'rmse': np.sqrt(F.mse_loss(pred_mean, true_mean).item()),\n 'rmse_top_100': np.sqrt(F.mse_loss(pred_mean[diff_genes_idx[:100]],\n true_mean[diff_genes_idx[:100]]).item()),\n 'rmse_delta': np.sqrt(F.mse_loss(pred_delta_mean, true_delta_mean).item()),\n # Correlation\n 'corr': PearsonCorr1d(pred_mean, true_mean).item(),\n 'corr_top_100': PearsonCorr1d(pred_mean[diff_genes_idx[:100]],\n true_mean[diff_genes_idx[:100]]).item(),\n 'corr_delta': PearsonCorr1d(pred_delta_mean, true_delta_mean).item(),\n # # Cosine similarity\n # 'cos': F.cosine_similarity(pred_mean.unsqueeze(0), true_mean.unsqueeze(0))[0].item(),\n # 'cos_top_100': F.cosine_similarity(pred_mean[diff_genes_idx[:100]].unsqueeze(0),\n # true_mean[diff_genes_idx[:100]].unsqueeze(0))[0].item(),\n # 'cos_delta': F.cosine_similarity(pred_delta_mean.unsqueeze(0),\n # true_delta_mean.unsqueeze(0))[0].item(),\n })\n\n if de_gene_idx is not None:\n for num_de in (20, 50, 100, 200):\n if num_de > len(de_gene_idx):\n warnings.warn(\n f\"Skipping {num_de} DE gene num eval since max num DE available is {len(de_gene_idx)}\",\n UserWarning,\n stacklevel=2,\n )\n continue\n if num_de > true.shape[1]:\n warnings.warn(\n f\"Skipping {num_de} DE gene num eval since max num genes available is {true.shape[1]}\",\n UserWarning,\n stacklevel=2,\n )\n continue\n\n idx = de_gene_idx[:num_de]\n scores.update(de_eval(pred_mean[idx], true_mean[idx], control_mean[idx], f\"de{num_de}\"))\n\n if ndde20_idx is not None:\n scores.update(de_eval(pred_mean[ndde20_idx], true_mean[ndde20_idx], control_mean[ndde20_idx], \"ndde20\"))\n\n return scores" }, { "identifier": "calculate_batch_r_squared", "path": "scdiff/evaluate.py", "snippet": "def calculate_batch_r_squared(pred, true, conditions):\n conditions = dict_of_arrays_to_tensor(conditions)\n unique_cond = conditions.unique(dim=0)\n r_squared_list = []\n for i in range(len(unique_cond)):\n cond_flag = torch.all((conditions == unique_cond[i]), dim=1)\n x = pred[cond_flag].mean(0).numpy()\n y = true[cond_flag].mean(0).numpy()\n _, _, r_value, _, _ = scipy.stats.linregress(x, y)\n r_squared_list.append(r_value ** 2)\n return r_squared_list" }, { "identifier": "LitEma", "path": "scdiff/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates\n else torch.tensor(-1,dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.','')\n self.m_name2s_name.update({name:s_name})\n self.register_buffer(s_name,p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self,model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "BasicTransformerBlock", "path": "scdiff/modules/layers/attention.py", "snippet": "class BasicTransformerBlock(nn.Module):\n def __init__(\n self,\n dim: int,\n n_heads: int,\n d_head: int = 64,\n self_attn: bool = True,\n cross_attn: bool = False,\n ts_cross_attn: bool = False,\n final_act: Optional[nn.Module] = None,\n dropout: float = 0.,\n context_dim: Optional[int] = None,\n gated_ff: bool = True,\n checkpoint: bool = False,\n qkv_bias: bool = False,\n linear_attn: bool = False,\n ):\n super().__init__()\n assert self_attn or cross_attn, 'At least on attention layer'\n self.self_attn = self_attn\n self.cross_attn = cross_attn\n self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff)\n if ts_cross_attn:\n raise NotImplementedError(\"Deprecated, please remove.\") # FIX: remove ts_cross_attn option\n # assert not (self_attn or linear_attn)\n # attn_cls = TokenSpecificCrossAttention\n else:\n assert not linear_attn, \"Performer attention not setup yet.\" # FIX: remove linear_attn option\n attn_cls = CrossAttention\n if self.cross_attn:\n self.attn1 = attn_cls(\n query_dim=dim,\n context_dim=context_dim,\n heads=n_heads,\n dim_head=d_head,\n dropout=dropout,\n qkv_bias=qkv_bias,\n ) # is self-attn if context is none\n if self.self_attn:\n self.attn2 = attn_cls(\n query_dim=dim,\n heads=n_heads,\n dim_head=d_head,\n dropout=dropout,\n qkv_bias=qkv_bias,\n ) # is a self-attention\n self.norm1 = nn.LayerNorm(dim)\n self.norm2 = nn.LayerNorm(dim)\n self.norm3 = nn.LayerNorm(dim)\n self.act = final_act\n self.checkpoint = checkpoint\n assert not self.checkpoint, 'Checkpointing not available yet' # FIX: remove checkpiont option\n\n @BatchedOperation(batch_dim=0, plain_num_dim=2)\n def forward(self, x, context=None, cross_mask=None, self_mask=None, **kwargs):\n if self.cross_attn:\n x = self.attn1(self.norm1(x), context=context, mask=cross_mask, **kwargs) + x\n if self.self_attn:\n x = self.attn2(self.norm2(x), mask=self_mask, **kwargs) + x\n x = self.ff(self.norm3(x)) + x\n if self.act is not None:\n x = self.act(x)\n return x" }, { "identifier": "FeedForward", "path": "scdiff/modules/layers/basic.py", "snippet": "class FeedForward(nn.Module):\n def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):\n super().__init__()\n inner_dim = int(dim * mult)\n dim_out = default(dim_out, dim)\n project_in = nn.Sequential(\n nn.Linear(dim, inner_dim),\n nn.GELU()\n ) if not glu else GEGLU(dim, inner_dim)\n\n self.net = nn.Sequential(\n project_in,\n nn.Dropout(dropout),\n nn.Linear(inner_dim, dim_out)\n )\n\n def forward(self, x):\n return self.net(x)" }, { "identifier": "EmbeddingDict", "path": "scdiff/modules/layers/scmodel.py", "snippet": "class EmbeddingDict(nn.Module):\n TEXT_EMB_DIR = './data/ontology_resources'\n\n def __init__(self, num_embed_dict, embedding_dim, depth, embedding_tokens=1,\n norm_layer=None, freeze=False, mask_ratio=0.0, text_emb=None,\n text_emb_file=None, freeze_text_emb=True, text_proj_type='linear',\n stackfnn_glu_flag=False, text_proj_hidden_dim=512, text_proj_act=None,\n text_proj_num_layers=2, text_proj_norm=None, text_proj_dropout=0.,\n gears_flag=False, gears_mode=\"single\", num_perts=None, gears_hidden_size=64,\n gears_mlp_layers=2, gears_norm=None, num_go_gnn_layers=1):\n super().__init__()\n size = embedding_dim * embedding_tokens\n n = embedding_tokens\n d = embedding_dim\n\n self.keys = sorted(num_embed_dict) # ensure consistent ordering\n self.mask_ratio = mask_ratio\n\n self.emb_dict = nn.ModuleDict()\n for key in self.keys:\n self.emb_dict[key] = nn.ModuleList([\n nn.Sequential(\n nn.Embedding(\n num_embed_dict[key],\n size,\n _freeze=freeze,\n ),\n create_norm(norm_layer, size),\n Rearrange('b (n d) -> b n d', n=n, d=d),\n )\n for _ in range(depth)\n ])\n\n if text_emb is not None or text_emb_file is not None:\n if text_emb is None:\n text_emb = torch.load(f'{self.TEXT_EMB_DIR}/{text_emb_file}')\n if text_proj_type == 'linear':\n text_proj = nn.Linear(text_emb.shape[1], size)\n elif text_proj_type == 'stackffn':\n text_proj = FeedForward(text_emb.shape[1], dim_out=size, mult=4, glu=stackfnn_glu_flag)\n elif text_proj_type == 'mlp':\n text_proj = MLPLayers(text_emb.shape[1], size, text_proj_hidden_dim, text_proj_num_layers,\n text_proj_dropout, text_proj_norm, text_proj_act)\n else:\n raise NotImplementedError(f\"Unsupported text_proj_type {text_proj_type}\")\n\n text_act = create_activation(text_proj_act)\n if text_proj_norm is None and norm_layer is not None:\n text_norm = create_norm(norm_layer, size)\n else:\n text_norm = create_norm(text_proj_norm, size)\n self.keys.append(\"text\")\n self.emb_dict['text'] = nn.ModuleList([\n nn.Sequential(\n nn.Embedding.from_pretrained(text_emb, freeze=freeze_text_emb),\n text_proj,\n text_norm,\n text_act,\n Rearrange('b (n d) -> b n d', n=n, d=d),\n )\n for _ in range(depth)\n ])\n\n if num_perts is not None and gears_flag:\n self.keys.append('pert')\n self.gears_mode = gears_mode\n gears_kwargs = dict(num_perts=num_perts, out_dim=size, mode=gears_mode,\n hidden_size=gears_hidden_size, mlp_layers=gears_mlp_layers)\n if gears_mode == \"single\":\n self.emb_dict['pert'] = nn.ModuleList([\n nn.Sequential(\n GEARS_Conditioner(num_go_gnn_layers=num_go_gnn_layers, **gears_kwargs),\n create_norm(gears_norm, size),\n Rearrange('b (n d) -> b n d', n=n, d=d),\n )\n for _ in range(depth)\n ])\n else:\n self.emb_dict['pert'] = nn.ModuleList([\n GEARS_Conditioner(num_go_gnn_layers=depth, **gears_kwargs),\n nn.ModuleList([create_norm(gears_norm, size) for _ in range(depth)]),\n Rearrange('b (n d) -> b n d', n=n, d=d),\n ])\n\n def __iter__(self):\n yield from self.keys\n\n def __getitem__(self, key):\n return self.emb_dict[key]\n\n def forward(self, input: Dict[str, torch.Tensor], aug_graph=None) -> List[torch.Tensor]:\n # Outer list: condition types; inner list: layer depth\n out = []\n for key in self.keys:\n if self.training:\n # NOTE: NULL condition token added during dataset init, and is\n # set to be the first token (index zero).\n mask = torch.rand_like(input[key].float()) < self.mask_ratio\n masked_input = input[key].long()\n if key != 'text' and key != \"pert\":\n masked_input[mask] = 0\n else:\n masked_input = input[key].long()\n\n if (\n isinstance(self[key][0], GEARS_Conditioner) # single\n or isinstance(self[key][0][0], GEARS_Conditioner) # parallel | sequential\n ):\n emb_list = []\n if self.gears_mode == \"single\":\n for emb in self[key]:\n gears_out = emb[0](masked_input, aug_graph)\n emb_list.append(emb[1:](gears_out))\n else:\n gears_out = self[key][0](masked_input, aug_graph)\n stack = zip(gears_out, self[key][1], repeat(self[key][2]))\n for emb, norm, rearrange in stack:\n emb_list.append(rearrange(norm(emb)))\n else:\n emb_list = [emb(masked_input) for emb in self[key]]\n\n out.append(emb_list)\n\n # Consolidate by concatenating along the token dimention in each layer\n out = [torch.cat(embs, dim=1) for embs in zip(*out)]\n\n return out" }, { "identifier": "MaskedEncoderConditioner", "path": "scdiff/utils/diffusion.py", "snippet": "class MaskedEncoderConditioner(nn.Module):\n \"\"\"Use 2-layer MLP to encoder available feature number.\n\n The encoded feature number condition is added to the cell embddings. If\n disabled, then directly return the original cell embeddings.\n\n \"\"\"\n\n def __init__(\n self,\n dim: int,\n mult: int = 4,\n use_ratio: bool = False,\n use_se: bool = False,\n use_semlp: bool = False,\n concat: bool = False,\n disable: bool = False,\n ):\n super().__init__()\n assert not (use_ratio and use_se), \"Cannot set use_se and use_ratio together\"\n assert not (use_se and use_semlp), \"Cannot set use_se and use_semlp together\"\n assert not (use_se and concat), \"Cannot set use_se and concat together\"\n self.dim = dim\n self.use_ratio = use_ratio\n self.use_se = use_se or use_semlp\n self.concat = concat\n self.disable = disable\n if not disable:\n dim_in = dim if self.use_se else 1\n dim_in = dim_in + dim if concat else dim_in\n dim_hid = dim * mult\n\n self.proj = nn.Sequential(\n nn.Linear(dim_in, dim_hid),\n nn.SiLU(),\n nn.Linear(dim_hid, dim),\n ) if not use_se else nn.Identity()\n\n def forward(self, x: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:\n if not self.disable and mask is not None:\n # Count the number of denoising input featues\n size = (mask.bool()).sum(1, keepdim=True).float()\n\n if self.use_ratio:\n h = size / x.shape[1]\n elif self.use_se:\n h = sinusoidal_embedding(size.ravel(), dim=self.dim, max_period=x.shape[1] + 1)\n else:\n h = size\n\n if self.concat:\n h = torch.cat((x, h), dim=-1)\n x = self.proj(h)\n else:\n h = self.proj(h)\n x = x + h\n\n return x" }, { "identifier": "timestep_embedding", "path": "scdiff/utils/diffusion.py", "snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n if not repeat_only:\n embedding = sinusoidal_embedding(timesteps, dim, max_period)\n else:\n embedding = repeat(timesteps, 'b -> b d', d=dim)\n return embedding" }, { "identifier": "make_beta_schedule", "path": "scdiff/utils/diffusion.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "as_1d_vec", "path": "scdiff/utils/misc.py", "snippet": "def as_1d_vec(x: torch.Tensor) -> torch.Tensor:\n if len(x.shape) == 1:\n x = x.unsqueeze(-1)\n elif len(x.shape) == 1:\n raise ValueError(f\"input must be one or two dimensional tensor, got {x.shape}\")\n return x" }, { "identifier": "exists", "path": "scdiff/utils/misc.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "count_params", "path": "scdiff/utils/misc.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "scdiff/utils/misc.py", "snippet": "def instantiate_from_config(\n config: Union[Dict, DictConfig, str],\n _target_key: str = \"target\",\n _params_key: str = \"params\",\n _catch_conflict: bool = True,\n **extra_kwargs: Any,\n):\n # Check target specificiation and handel special conditions\n if _target_key not in config:\n if config == \"__is_first_stage__\":\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(f\"Expected key `{_target_key}` to instantiate.\")\n\n # Obtain target object and kwargs\n cls = get_obj_from_str(config[\"target\"])\n kwargs = config.get(_params_key, dict())\n\n # Check conflict and merge kwargs\n if (common_keys := sorted(set(kwargs) & set(extra_kwargs))):\n diff_keys = []\n for key in common_keys:\n if kwargs[key] != extra_kwargs[key]:\n diff_keys.append(key)\n\n if diff_keys and _catch_conflict:\n conflicting_config_kwargs = {i: kwargs[i] for i in diff_keys}\n conflicting_extra_kwargs = {i: extra_kwargs[i] for i in diff_keys}\n raise ValueError(\n \"Conflicting parameters between configs and those that are \"\n \"additionally specified. Please resolve or set _catch_conflict \"\n f\"to False to bypass this issue.\\n{conflicting_config_kwargs=}\\n\"\n f\"{conflicting_extra_kwargs=}\\n\",\n )\n kwargs = {**kwargs, **extra_kwargs}\n\n # Instantiate object and handel exception during instantiation\n try:\n return cls(**kwargs)\n except Exception as e:\n raise RuntimeError(f\"Failed to instantiate {cls!r} with kwargs:\\n{pformat(kwargs)}\") from e" }, { "identifier": "default", "path": "scdiff/utils/misc.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "create_activation", "path": "scdiff/utils/modules.py", "snippet": "def create_activation(name):\n if name is None:\n return nn.Identity()\n elif name == \"relu\":\n return nn.ReLU()\n elif name == \"gelu\":\n return nn.GELU()\n elif name == \"glu\":\n return nn.GLU()\n elif name == \"sigmoid\":\n return nn.Sigmoid()\n elif name == \"prelu\":\n return nn.PReLU()\n elif name == \"elu\":\n return nn.ELU()\n else:\n raise NotImplementedError(f\"{name} is not implemented.\")" }, { "identifier": "create_norm", "path": "scdiff/utils/modules.py", "snippet": "def create_norm(name, n, h=16):\n if name is None:\n return nn.Identity()\n elif name == \"layernorm\":\n return nn.LayerNorm(n)\n elif name == \"batchnorm\":\n return nn.BatchNorm1d(n)\n elif name == \"groupnorm\":\n return nn.GroupNorm(h, n)\n elif name.startswith(\"groupnorm\"):\n inferred_num_groups = int(name.repalce(\"groupnorm\", \"\"))\n return nn.GroupNorm(inferred_num_groups, n)\n else:\n raise NotImplementedError(f\"{name} is not implemented.\")" }, { "identifier": "extract_into_tensor", "path": "scdiff/utils/modules.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "init_weights", "path": "scdiff/utils/modules.py", "snippet": "def init_weights(m):\n if isinstance(m, nn.Linear):\n torch.nn.init.xavier_uniform_(m.weight)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, (nn.LayerNorm, nn.GroupNorm, nn.BatchNorm1d)):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)" }, { "identifier": "mean_flat", "path": "scdiff/utils/modules.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "noise_like", "path": "scdiff/utils/modules.py", "snippet": "def noise_like(shape, device, repeat=False):\n if repeat:\n noise = torch.randn((1, *shape[1:]), device=device)\n repeat_noise = noise.repeat(shape[0], *((1,) * (len(shape) - 1)))\n return repeat_noise\n else:\n return torch.randn(shape, device=device)" } ]
import warnings import anndata as ad import numpy as np import pandas as pd import torch import torch.nn as nn import torch.nn.functional as F import pytorch_lightning as pl from contextlib import contextmanager from functools import partial from einops.layers.torch import Rearrange from scipy.sparse import csr_matrix from torch.optim.lr_scheduler import LambdaLR from tqdm import tqdm from scdiff.modules.diffusion_model import Decoder, Embedder, Encoder from scdiff.evaluate import ( denoising_eval, evaluate_annotation, perturbation_eval, calculate_batch_r_squared, ) from scdiff.modules.ema import LitEma from scdiff.modules.layers.attention import BasicTransformerBlock from scdiff.modules.layers.basic import FeedForward from scdiff.modules.layers.scmodel import EmbeddingDict from scdiff.utils.diffusion import MaskedEncoderConditioner, timestep_embedding from scdiff.utils.diffusion import make_beta_schedule from scdiff.utils.misc import as_1d_vec, exists, count_params, instantiate_from_config from scdiff.utils.misc import default from scdiff.utils.modules import create_activation, create_norm from scdiff.utils.modules import extract_into_tensor, init_weights, mean_flat, noise_like
12,115
raise ValueError(f"Unknwon condition embedder type {cond_emb_type}") else: self.cond_embed = None self.encoder = Encoder(depth, decoder_embed_dim, decoder_num_heads, decoder_dim_head, dropout=dropout, cond_type=cond_type, cond_cat_input=cond_cat_input) # self.mask_token = nn.Parameter(torch.zeros(1, decoder_embed_dim)) self.decoder_embed_type = decoder_embed_type assert decoder_embed_type in ['linear', 'embedder', 'encoder'] if decoder_embed_type == 'linear': self.decoder_embed = nn.Linear(self.in_dim, decoder_embed_dim) elif decoder_embed_type == 'embedder': self.decoder_embed = Embedder(pretrained_gene_list, decoder_embed_dim, 'layernorm', dropout=dropout) elif decoder_embed_type == 'encoder': self.decoder_embed = self.embedder self.mask_decoder_conditioner = MaskedEncoderConditioner( decoder_embed_dim, mult=4, use_ratio=mask_dec_cond_ratio, use_se=mask_dec_cond_se, use_semlp=mask_dec_cond_semlp, concat=mask_dec_cond_concat, disable=not mask_dec_cond) self.decoder_norm = create_norm(norm_layer, decoder_embed_dim) self.decoder = Decoder(decoder_embed_dim, self.in_dim, dropout, post_cond_norm, post_cond_layers, post_cond_num_dict, act=activation, cond_emb_dim=decoder_embed_dim, cond_mask_ratio=post_cond_mask_ratio) # -------------------------------------------------------------------------- self.initialize_weights() def initialize_weights(self): # initialize linear and normalization layers self.apply(init_weights) # TODO: move to DDPM and get mask from there (masking is indepdent on forward)? def random_masking(self, x): # mask: 0 keep, 1 drop cell_mask_ratio = self.cell_mask_ratio feat_mask_ratio = self.feat_mask_ratio N, D = x.shape # batch, dim if self.mask_mode == "v1": x_masked = x.clone() # apply cell masking len_keep = int(N * (1 - cell_mask_ratio)) perm = np.random.permutation(N) idx_keep = perm[:len_keep] # generate the binary mask: 0 is keep, 1 is remove mask = torch.ones([N, D], device=x.device) mask[idx_keep] = 0 # apply feature masking on the remaining part if feat_mask_ratio > 0: if self.mask_strategy == 'random': feat_mask = mask[idx_keep] feat_mask[torch.rand(len_keep, D) <= feat_mask_ratio] = 1 mask[idx_keep] = feat_mask elif self.mask_strategy == 'none_pad': for i in idx_keep: row = x_masked[i] non_padding_idx = torch.nonzero(row - self.pad_value)[0] n_mask = int(len(non_padding_idx) * feat_mask_ratio) mask_idx = np.random.choice(non_padding_idx, n_mask, replace=False) mask[i][mask_idx] = 1 else: raise NotImplementedError(f'Unsupported mask strategy: {self.mask_strategy}') x_masked[mask.bool()] = self.mask_value elif self.mask_mode == "v2": if feat_mask_ratio != 0: warnings.warn( "v2 mask disregards feat_mask_ratio, which is currently " f"set to {feat_mask_ratio!r}.", UserWarning, stacklevel=2, ) mask_ratios = torch.rand(N, 1, device=x.device) mask_ratios[torch.rand(N) < self.cell_mask_ratio] = 1 mask = torch.rand_like(x) < mask_ratios x_masked = torch.zeros_like(x).masked_scatter(~mask, x) return x_masked, mask def forward_encoder(self, x, pe_input=None, input_gene_list=None, input_gene_idx=None): # embed input input_gene_list = default(input_gene_list, self.input_gene_list) input_gene_idx = default(input_gene_idx, self.input_gene_idx) x, gene_idx = self.embedder(x, pe_input, input_gene_list, input_gene_idx) if self.blocks is None: hist = [None] * self.depth elif self.encoder_type in ("mlpparallel", "ffnparallel"): hist = [self.post_encoder_layer(blk(x)) for blk in self.blocks] else: hist = [] for blk in self.blocks: # apply context encoder blocks x = blk(x) hist.append(self.post_encoder_layer(x)) return hist, gene_idx def forward_decoder(self, x, context_list, timesteps=None, pe_input=None, conditions=None, input_gene_list=None, input_gene_idx=None, aug_graph=None, return_latent=False, mask=None): # embed tokens if self.decoder_embed_type == 'linear': x = self.decoder_embed(x) else: input_gene_list = default(input_gene_list, self.input_gene_list) input_gene_idx = default(input_gene_idx, self.input_gene_idx) x, _ = self.decoder_embed(x, pe_input, input_gene_list, input_gene_idx) # apply masked conditioner x = self.mask_decoder_conditioner(x, mask) # calculate time embedding if timesteps is not None and not self.no_time_embed: timesteps = timesteps.repeat(x.shape[0]) if len(timesteps) == 1 else timesteps
""" Wild mixture of: https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/facebookresearch/mae/blob/efb2a8062c206524e35e47d04501ed4f544c0ae8 Thank you! """ RESCALE_FACTOR = np.log(1e4) class DiffusionModel(nn.Module): def __init__(self, pretrained_gene_list, input_gene_list=None, dropout=0., cell_mask_ratio=0.75, mask_context=True, encoder_type='stackffn', embed_dim=1024, depth=4, dim_head=64, num_heads=4, feat_mask_ratio=0., decoder_embed_dim=512, decoder_embed_type='linear', decoder_num_heads=4, decoder_dim_head=64, cond_dim=None, cond_tokens=1, cond_type='crossattn', cond_strategy='full_mix', cond_emb_type='linear', cond_num_dict=None, cond_mask_ratio=0.5, cond_cat_input=False, post_cond_num_dict=None, post_cond_layers=2, post_cond_norm='layernorm', post_cond_mask_ratio=0.0, norm_layer='layernorm', mlp_time_embed=False, no_time_embed=False, activation='gelu', mask_strategy='random', mask_mode='v1', mask_dec_cond=False, mask_dec_cond_ratio=False, mask_dec_cond_se=False, mask_dec_cond_semlp=False, mask_dec_cond_concat=False, mask_value=0, pad_value=0, decoder_mask=None, text_emb=None, text_emb_file=None, freeze_text_emb=True, text_proj_type='linear', text_proj_act=None, stackfnn_glu_flag=False, text_proj_hidden_dim=512, text_proj_num_layers=2, text_proj_norm=None, cond_emb_norm=None, num_perts=None, gears_flag=False, gears_hidden_size=64, gears_mode="single", gears_mlp_layers=2, gears_norm=None, num_go_gnn_layers=1): super().__init__() self.depth = depth # -------------------------------------------------------------------------- # MAE masking options self.cell_mask_ratio = cell_mask_ratio self.feat_mask_ratio = feat_mask_ratio self.mask_context = mask_context self.mask_mode = mask_mode self.mask_strategy = mask_strategy self.mask_value = mask_value self.pad_value = pad_value self.decoder_mask = decoder_mask # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # MAE encoder specifics activation = create_activation(activation) # self.in_dim = len(input_gene_list) if input_gene_list is not None else len(pretrained_gene_list) self.in_dim = len(pretrained_gene_list) if pretrained_gene_list is not None else len(input_gene_list) self.pretrained_gene_list = pretrained_gene_list self.input_gene_list = input_gene_list pretrained_gene_index = dict(zip(self.pretrained_gene_list, list(range(len(self.pretrained_gene_list))))) self.input_gene_idx = torch.tensor([ pretrained_gene_index[o] for o in self.input_gene_list if o in pretrained_gene_index ]).long() if self.input_gene_list is not None else None assert embed_dim == decoder_embed_dim # XXX: this seems to be required for MAE (see forward dec)? full_embed_dim = embed_dim * cond_tokens self.post_encoder_layer = Rearrange('b (n d) -> b n d', n=cond_tokens, d=embed_dim) self.embedder = Embedder(pretrained_gene_list, full_embed_dim, 'layernorm', dropout=dropout) self.encoder_type = encoder_type if encoder_type == 'attn': self.blocks = nn.ModuleList([ BasicTransformerBlock(full_embed_dim, num_heads, dim_head, self_attn=True, cross_attn=False, dropout=dropout, qkv_bias=True, final_act=activation) for _ in range(depth)]) elif encoder_type in ('mlp', 'mlpparallel'): self.blocks = nn.ModuleList([ nn.Sequential( nn.Linear(full_embed_dim, full_embed_dim), activation, create_norm(norm_layer, full_embed_dim), ) for _ in range(depth)]) elif encoder_type in ('stackffn', 'ffnparallel'): self.blocks = nn.ModuleList([ # FeedForward(full_embed_dim, mult=4, glu=False, dropout=dropout) nn.Sequential( FeedForward(full_embed_dim, mult=4, glu=False, dropout=dropout), create_norm(norm_layer, full_embed_dim), ) for _ in range(depth)]) elif encoder_type == 'none': self.blocks = None else: raise ValueError(f'Unknown encoder type {encoder_type}') # self.encoder_proj = nn.Linear(full_embed_dim, latent_dim) # self.norm = create_norm(norm_layer, full_embed_dim) # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # MAE decoder specifics self.subset_output = True self.decoder_embed_dim = decoder_embed_dim self.time_embed = nn.Sequential( nn.Linear(decoder_embed_dim, 4 * decoder_embed_dim), nn.SiLU(), nn.Linear(4 * decoder_embed_dim, decoder_embed_dim), ) if mlp_time_embed else nn.Identity() self.no_time_embed = no_time_embed self.cond_type = cond_type assert cond_strategy in ("full_mix", "pre_mix") self.cond_strategy = cond_strategy self.cond_emb_type = cond_emb_type self.cond_tokens = cond_tokens self.cond_cat_input = cond_cat_input if cond_dim is not None or cond_num_dict is not None: if cond_emb_type == 'linear': assert cond_dim is not None self.cond_embed = nn.Sequential( nn.Linear(cond_dim, decoder_embed_dim * cond_tokens), Rearrange('b (n d) -> b n d', n=cond_tokens, d=decoder_embed_dim), ) elif cond_emb_type == 'embedding': assert cond_num_dict is not None self.cond_embed = EmbeddingDict(cond_num_dict, decoder_embed_dim, depth, cond_tokens, mask_ratio=cond_mask_ratio, text_emb=text_emb, text_emb_file=text_emb_file, norm_layer=cond_emb_norm, freeze_text_emb=freeze_text_emb, text_proj_type=text_proj_type, text_proj_num_layers=text_proj_num_layers, stackfnn_glu_flag=stackfnn_glu_flag, text_proj_hidden_dim=text_proj_hidden_dim, text_proj_act=text_proj_act, text_proj_norm=text_proj_norm, # text_proj_dropout=dropout, G_go=G_go, # G_go_weight=G_go_weight, num_perts=num_perts, text_proj_dropout=dropout, gears_flag=gears_flag, num_perts=num_perts, gears_hidden_size=gears_hidden_size, gears_mode=gears_mode, gears_mlp_layers=gears_mlp_layers, gears_norm=gears_norm, num_go_gnn_layers=num_go_gnn_layers) elif cond_emb_type == 'none': self.cond_embed = None else: raise ValueError(f"Unknwon condition embedder type {cond_emb_type}") else: self.cond_embed = None self.encoder = Encoder(depth, decoder_embed_dim, decoder_num_heads, decoder_dim_head, dropout=dropout, cond_type=cond_type, cond_cat_input=cond_cat_input) # self.mask_token = nn.Parameter(torch.zeros(1, decoder_embed_dim)) self.decoder_embed_type = decoder_embed_type assert decoder_embed_type in ['linear', 'embedder', 'encoder'] if decoder_embed_type == 'linear': self.decoder_embed = nn.Linear(self.in_dim, decoder_embed_dim) elif decoder_embed_type == 'embedder': self.decoder_embed = Embedder(pretrained_gene_list, decoder_embed_dim, 'layernorm', dropout=dropout) elif decoder_embed_type == 'encoder': self.decoder_embed = self.embedder self.mask_decoder_conditioner = MaskedEncoderConditioner( decoder_embed_dim, mult=4, use_ratio=mask_dec_cond_ratio, use_se=mask_dec_cond_se, use_semlp=mask_dec_cond_semlp, concat=mask_dec_cond_concat, disable=not mask_dec_cond) self.decoder_norm = create_norm(norm_layer, decoder_embed_dim) self.decoder = Decoder(decoder_embed_dim, self.in_dim, dropout, post_cond_norm, post_cond_layers, post_cond_num_dict, act=activation, cond_emb_dim=decoder_embed_dim, cond_mask_ratio=post_cond_mask_ratio) # -------------------------------------------------------------------------- self.initialize_weights() def initialize_weights(self): # initialize linear and normalization layers self.apply(init_weights) # TODO: move to DDPM and get mask from there (masking is indepdent on forward)? def random_masking(self, x): # mask: 0 keep, 1 drop cell_mask_ratio = self.cell_mask_ratio feat_mask_ratio = self.feat_mask_ratio N, D = x.shape # batch, dim if self.mask_mode == "v1": x_masked = x.clone() # apply cell masking len_keep = int(N * (1 - cell_mask_ratio)) perm = np.random.permutation(N) idx_keep = perm[:len_keep] # generate the binary mask: 0 is keep, 1 is remove mask = torch.ones([N, D], device=x.device) mask[idx_keep] = 0 # apply feature masking on the remaining part if feat_mask_ratio > 0: if self.mask_strategy == 'random': feat_mask = mask[idx_keep] feat_mask[torch.rand(len_keep, D) <= feat_mask_ratio] = 1 mask[idx_keep] = feat_mask elif self.mask_strategy == 'none_pad': for i in idx_keep: row = x_masked[i] non_padding_idx = torch.nonzero(row - self.pad_value)[0] n_mask = int(len(non_padding_idx) * feat_mask_ratio) mask_idx = np.random.choice(non_padding_idx, n_mask, replace=False) mask[i][mask_idx] = 1 else: raise NotImplementedError(f'Unsupported mask strategy: {self.mask_strategy}') x_masked[mask.bool()] = self.mask_value elif self.mask_mode == "v2": if feat_mask_ratio != 0: warnings.warn( "v2 mask disregards feat_mask_ratio, which is currently " f"set to {feat_mask_ratio!r}.", UserWarning, stacklevel=2, ) mask_ratios = torch.rand(N, 1, device=x.device) mask_ratios[torch.rand(N) < self.cell_mask_ratio] = 1 mask = torch.rand_like(x) < mask_ratios x_masked = torch.zeros_like(x).masked_scatter(~mask, x) return x_masked, mask def forward_encoder(self, x, pe_input=None, input_gene_list=None, input_gene_idx=None): # embed input input_gene_list = default(input_gene_list, self.input_gene_list) input_gene_idx = default(input_gene_idx, self.input_gene_idx) x, gene_idx = self.embedder(x, pe_input, input_gene_list, input_gene_idx) if self.blocks is None: hist = [None] * self.depth elif self.encoder_type in ("mlpparallel", "ffnparallel"): hist = [self.post_encoder_layer(blk(x)) for blk in self.blocks] else: hist = [] for blk in self.blocks: # apply context encoder blocks x = blk(x) hist.append(self.post_encoder_layer(x)) return hist, gene_idx def forward_decoder(self, x, context_list, timesteps=None, pe_input=None, conditions=None, input_gene_list=None, input_gene_idx=None, aug_graph=None, return_latent=False, mask=None): # embed tokens if self.decoder_embed_type == 'linear': x = self.decoder_embed(x) else: input_gene_list = default(input_gene_list, self.input_gene_list) input_gene_idx = default(input_gene_idx, self.input_gene_idx) x, _ = self.decoder_embed(x, pe_input, input_gene_list, input_gene_idx) # apply masked conditioner x = self.mask_decoder_conditioner(x, mask) # calculate time embedding if timesteps is not None and not self.no_time_embed: timesteps = timesteps.repeat(x.shape[0]) if len(timesteps) == 1 else timesteps
time_embed = self.time_embed(timestep_embedding(timesteps, self.decoder_embed_dim))
12
2023-10-13 14:20:34+00:00
16k
weavel-ai/promptmodel-python
promptmodel/chat_model.py
[ { "identifier": "DevClient", "path": "promptmodel/dev_app.py", "snippet": "class DevClient:\n \"\"\"DevClient main class\"\"\"\n\n def __init__(self):\n self.function_models: List[FunctionModelInterface] = []\n self.chat_models: List[ChatModelInterface] = []\n\n def register(self, func):\n instructions = list(dis.get_instructions(func))\n for idx in range(\n len(instructions) - 1\n ): # We check up to len-1 because we access idx+1 inside loop\n instruction = instructions[idx]\n # print(instruction)\n if instruction.opname in [\"LOAD_ATTR\", \"LOAD_METHOD\", \"LOAD_GLOBAL\"] and (\n instruction.argval == \"FunctionModel\"\n or instruction.argval == \"ChatModel\"\n ):\n next_instruction = instructions[idx + 1]\n\n # Check if the next instruction is LOAD_CONST with string value\n if next_instruction.opname == \"LOAD_CONST\" and isinstance(\n next_instruction.argval, str\n ):\n if instruction.argval == \"FunctionModel\":\n self.function_models.append(\n FunctionModelInterface(name=next_instruction.argval)\n )\n elif instruction.argval == \"ChatModel\":\n self.chat_models.append(\n ChatModelInterface(name=next_instruction.argval)\n )\n\n def wrapper(*args, **kwargs):\n return func(*args, **kwargs)\n\n return wrapper\n\n def register_function_model(self, name):\n for function_model in self.function_models:\n if function_model.name == name:\n return\n\n self.function_models.append(FunctionModelInterface(name=name))\n\n def register_chat_model(self, name):\n for chat_model in self.chat_models:\n if chat_model.name == name:\n return\n\n self.chat_models.append(ChatModelInterface(name=name))\n\n def _get_function_model_name_list(self) -> List[str]:\n return [function_model.name for function_model in self.function_models]" }, { "identifier": "LLMProxy", "path": "promptmodel/llms/llm_proxy.py", "snippet": "class LLMProxy(LLM):\n def __init__(\n self,\n name: str,\n version: Optional[Union[str, int]] = \"deploy\",\n unit_config: Optional[UnitConfig] = None\n ):\n super().__init__()\n self._name = name\n self.version = version\n self.unit_config = unit_config\n\n def _wrap_gen(self, gen: Callable[..., Any]) -> Callable[..., Any]:\n def wrapper(inputs: Dict[str, Any], **kwargs):\n prompts, version_details = run_async_in_sync(\n LLMProxy.fetch_prompts(self._name, self.version)\n )\n call_args = self._prepare_call_args(\n prompts, version_details, inputs, kwargs\n )\n\n log_uuid = str(uuid4())\n\n # Call the generator with the arguments\n stream_response: Generator[LLMStreamResponse, None, None] = gen(**call_args)\n\n api_response = None\n dict_cache = {} # to store aggregated dictionary values\n string_cache = \"\" # to store aggregated string values\n error_occurs = False\n error_log = None\n for item in stream_response:\n if (\n item.api_response and \"delta\" not in item.api_response.choices[0]\n ): # only get the last api_response, not delta response\n api_response = item.api_response\n if item.parsed_outputs:\n dict_cache = update_dict(dict_cache, item.parsed_outputs)\n if item.raw_output:\n string_cache += item.raw_output\n if item.error and not error_occurs:\n error_occurs = True\n error_log = item.error_log\n\n if error_occurs:\n # delete all promptmodel data in item\n item.raw_output = None\n item.parsed_outputs = None\n item.function_call = None\n item.pm_detail = PMDetail(\n model=version_details[\"model\"],\n name=self._name,\n version_uuid=str(version_details[\"uuid\"]),\n version=version_details[\"version\"],\n log_uuid=log_uuid,\n )\n yield item\n\n metadata = {\n \"error\": error_occurs,\n \"error_log\": error_log,\n }\n\n run_async_in_sync(\n self._async_log_to_cloud(\n log_uuid=log_uuid,\n version_uuid=version_details[\"uuid\"],\n inputs=inputs,\n api_response=api_response,\n parsed_outputs=dict_cache,\n metadata=metadata,\n )\n )\n\n return wrapper\n\n def _wrap_async_gen(self, async_gen: Callable[..., Any]) -> Callable[..., Any]:\n async def wrapper(inputs: Dict[str, Any], **kwargs):\n prompts, version_details = await LLMProxy.fetch_prompts(\n self._name, self.version\n )\n call_args = self._prepare_call_args(\n prompts, version_details, inputs, kwargs\n )\n\n # Call async_gen with the arguments\n stream_response: AsyncGenerator[LLMStreamResponse, None] = async_gen(\n **call_args\n )\n\n log_uuid = str(uuid4())\n\n api_response = None\n dict_cache = {} # to store aggregated dictionary values\n string_cache = \"\" # to store aggregated string values\n error_occurs = False\n error_log = None\n api_response: Optional[ModelResponse] = None\n async for item in stream_response:\n if (\n item.api_response and \"delta\" not in item.api_response.choices[0]\n ): # only get the last api_response, not delta response\n api_response = item.api_response\n if item.parsed_outputs:\n dict_cache = update_dict(dict_cache, item.parsed_outputs)\n if item.raw_output:\n string_cache += item.raw_output\n if item.error and not error_occurs:\n error_occurs = True\n error_log = item.error_log\n item.pm_detail = PMDetail(\n model=version_details[\"model\"],\n name=self._name,\n version_uuid=str(version_details[\"uuid\"]),\n version=version_details[\"version\"],\n log_uuid=log_uuid,\n )\n yield item\n\n # # add string_cache in model_response\n # if api_response:\n # if \"message\" not in api_response.choices[0]:\n # api_response.choices[0].message = {}\n # if \"content\" not in api_response.choices[0].message:\n # api_response.choices[0].message[\"content\"] = string_cache\n # api_response.choices[0].message[\"role\"] = \"assistant\"\n\n metadata = {\n \"error\": error_occurs,\n \"error_log\": error_log,\n }\n await self._async_log_to_cloud(\n log_uuid=log_uuid,\n version_uuid=version_details[\"uuid\"],\n inputs=inputs,\n api_response=api_response,\n parsed_outputs=dict_cache,\n metadata=metadata,\n )\n\n # raise Exception(\"error_log\")\n\n return wrapper\n\n def _wrap_method(self, method: Callable[..., Any]) -> Callable[..., Any]:\n def wrapper(inputs: Dict[str, Any], **kwargs):\n prompts, version_details = run_async_in_sync(\n LLMProxy.fetch_prompts(self._name, self.version)\n )\n call_args = self._prepare_call_args(\n prompts, version_details, inputs, kwargs\n )\n\n # Call the method with the arguments\n llm_response: LLMResponse = method(**call_args)\n error_occurs = llm_response.error\n error_log = llm_response.error_log\n metadata = {\n \"error\": error_occurs,\n \"error_log\": error_log,\n }\n log_uuid = str(uuid4())\n if llm_response.parsed_outputs:\n run_async_in_sync(\n self._async_log_to_cloud(\n log_uuid=log_uuid,\n version_uuid=version_details[\"uuid\"],\n inputs=inputs,\n api_response=llm_response.api_response,\n parsed_outputs=llm_response.parsed_outputs,\n metadata=metadata,\n )\n )\n else:\n run_async_in_sync(\n self._async_log_to_cloud(\n log_uuid=log_uuid,\n version_uuid=version_details[\"uuid\"],\n inputs=inputs,\n api_response=llm_response.api_response,\n parsed_outputs={},\n metadata=metadata,\n )\n )\n if error_occurs:\n # delete all promptmodel data in llm_response\n llm_response.raw_output = None\n llm_response.parsed_outputs = None\n llm_response.function_call = None\n\n llm_response.pm_detail = PMDetail(\n model=version_details[\"model\"],\n name=self._name,\n version_uuid=str(version_details[\"uuid\"]),\n version=version_details[\"version\"],\n log_uuid=log_uuid,\n )\n return llm_response\n\n return wrapper\n\n def _wrap_async_method(self, method: Callable[..., Any]) -> Callable[..., Any]:\n async def async_wrapper(inputs: Dict[str, Any], **kwargs):\n prompts, version_details = await LLMProxy.fetch_prompts(\n self._name, self.version\n ) # messages, model, uuid = self._fetch_prompts()\n call_args = self._prepare_call_args(\n prompts, version_details, inputs, kwargs\n )\n\n # Call the method with the arguments\n llm_response: LLMResponse = await method(**call_args)\n error_occurs = llm_response.error\n error_log = llm_response.error_log\n metadata = {\n \"error\": error_occurs,\n \"error_log\": error_log,\n }\n log_uuid = str(uuid4())\n if llm_response.parsed_outputs:\n await self._async_log_to_cloud(\n log_uuid=log_uuid,\n version_uuid=version_details[\"uuid\"],\n inputs=inputs,\n api_response=llm_response.api_response,\n parsed_outputs=llm_response.parsed_outputs,\n metadata=metadata,\n )\n else:\n await self._async_log_to_cloud(\n log_uuid=log_uuid,\n version_uuid=version_details[\"uuid\"],\n inputs=inputs,\n api_response=llm_response.api_response,\n parsed_outputs={},\n metadata=metadata,\n )\n\n if error_occurs:\n # delete all promptmodel data in llm_response\n llm_response.raw_output = None\n llm_response.parsed_outputs = None\n llm_response.function_call = None\n\n llm_response.pm_detail = PMDetail(\n model=version_details[\"model\"],\n name=self._name,\n version_uuid=str(version_details[\"uuid\"]),\n version=version_details[\"version\"],\n log_uuid=log_uuid,\n )\n return llm_response\n\n return async_wrapper\n\n def _wrap_chat(self, method: Callable[..., Any]) -> Callable[..., Any]:\n def wrapper(session_uuid: str, **kwargs):\n instruction, version_details, message_logs = run_async_in_sync(\n LLMProxy.fetch_chat_model(self._name, session_uuid, self.version)\n )\n\n call_args = self._prepare_call_args_for_chat(\n message_logs, version_details, kwargs\n )\n\n # Call the method with the arguments\n llm_response: LLMResponse = method(**call_args)\n error_occurs = llm_response.error\n error_log = llm_response.error_log\n metadata = {\n \"error\": error_occurs,\n \"error_log\": error_log,\n }\n api_response = None\n if llm_response.api_response:\n api_response = llm_response.api_response\n\n log_uuid = str(uuid4())\n\n run_async_in_sync(\n self._async_chat_log_to_cloud(\n session_uuid=session_uuid,\n version_uuid=version_details[\"uuid\"],\n chat_log_request_list=[\n ChatLogRequest(\n message=llm_response.api_response.choices[\n 0\n ].message.model_dump(),\n uuid=log_uuid,\n metadata=metadata,\n api_response=api_response,\n )\n ],\n )\n )\n\n if error_occurs:\n # delete all promptmodel data in llm_response\n llm_response.raw_output = None\n llm_response.parsed_outputs = None\n llm_response.function_call = None\n\n llm_response.pm_detail = PMDetail(\n model=version_details[\"model\"],\n name=self._name,\n version_uuid=str(version_details[\"uuid\"]),\n version=version_details[\"version\"],\n log_uuid=log_uuid,\n )\n return llm_response\n\n return wrapper\n\n def _wrap_async_chat(self, method: Callable[..., Any]) -> Callable[..., Any]:\n async def async_wrapper(session_uuid: str, **kwargs):\n (\n instruction,\n version_details,\n message_logs,\n ) = await LLMProxy.fetch_chat_model(self._name, session_uuid, self.version)\n\n call_args = self._prepare_call_args_for_chat(\n message_logs, version_details, kwargs\n )\n\n # Call the method with the arguments\n llm_response: LLMResponse = await method(**call_args)\n error_occurs = llm_response.error\n error_log = llm_response.error_log\n metadata = {\n \"error\": error_occurs,\n \"error_log\": error_log,\n }\n api_response = None\n if llm_response.api_response:\n api_response = llm_response.api_response\n\n log_uuid = str(uuid4())\n await self._async_chat_log_to_cloud(\n session_uuid=session_uuid,\n version_uuid=version_details[\"uuid\"],\n chat_log_request_list=[\n ChatLogRequest(\n uuid=log_uuid,\n message=llm_response.api_response.choices[\n 0\n ].message.model_dump(),\n metadata=metadata,\n api_response=api_response,\n )\n ],\n )\n\n if error_occurs:\n # delete all promptmodel data in llm_response\n llm_response.raw_output = None\n llm_response.parsed_outputs = None\n llm_response.function_call = None\n\n llm_response.pm_detail = PMDetail(\n model=version_details[\"model\"],\n name=self._name,\n version_uuid=str(version_details[\"uuid\"]),\n version=version_details[\"version\"],\n log_uuid=log_uuid,\n )\n return llm_response\n\n return async_wrapper\n\n def _wrap_chat_gen(self, gen: Callable[..., Any]) -> Callable[..., Any]:\n def wrapper(session_uuid: str, **kwargs):\n instruction, version_details, message_logs = run_async_in_sync(\n LLMProxy.fetch_chat_model(self._name, session_uuid, self.version)\n )\n\n call_args = self._prepare_call_args_for_chat(\n message_logs, version_details, kwargs\n )\n # Call the generator with the arguments\n stream_response: Generator[LLMStreamResponse, None, None] = gen(**call_args)\n\n api_response = None\n error_occurs = False\n error_log = None\n log_uuid = str(uuid4())\n for item in stream_response:\n if (\n item.api_response and \"delta\" not in item.api_response.choices[0]\n ): # only get the last api_response, not delta response\n api_response = item.api_response\n\n if item.error and not error_occurs:\n error_occurs = True\n error_log = item.error_log\n\n if error_occurs:\n # delete all promptmodel data in item\n item.raw_output = None\n item.parsed_outputs = None\n item.function_call = None\n item.pm_detail = PMDetail(\n model=version_details[\"model\"],\n name=self._name,\n version_uuid=str(version_details[\"uuid\"]),\n version=version_details[\"version\"],\n log_uuid=log_uuid,\n )\n yield item\n\n metadata = {\n \"error\": error_occurs,\n \"error_log\": error_log,\n }\n run_async_in_sync(\n self._async_chat_log_to_cloud(\n session_uuid=session_uuid,\n version_uuid=version_details[\"uuid\"],\n chat_log_request_list=[\n ChatLogRequest(\n uuid=log_uuid,\n message=api_response.choices[0].message.model_dump(),\n metadata=metadata,\n api_response=api_response,\n )\n ],\n )\n )\n\n return wrapper\n\n def _wrap_async_chat_gen(self, async_gen: Callable[..., Any]) -> Callable[..., Any]:\n async def wrapper(session_uuid: str, **kwargs):\n (\n instruction,\n version_details,\n message_logs,\n ) = await LLMProxy.fetch_chat_model(self._name, session_uuid, self.version)\n\n call_args = self._prepare_call_args_for_chat(\n message_logs, version_details, kwargs\n )\n # Call the generator with the arguments\n stream_response: AsyncGenerator[LLMStreamResponse, None] = async_gen(\n **call_args\n )\n\n api_response = None\n error_occurs = False\n error_log = None\n log_uuid = str(uuid4())\n async for item in stream_response:\n if (\n item.api_response and \"delta\" not in item.api_response.choices[0]\n ): # only get the last api_response, not delta response\n api_response = item.api_response\n\n if item.error and not error_occurs:\n error_occurs = True\n error_log = item.error_log\n\n if error_occurs:\n # delete all promptmodel data in item\n item.raw_output = None\n item.parsed_outputs = None\n item.function_call = None\n\n item.pm_detail = PMDetail(\n model=version_details[\"model\"],\n name=self._name,\n version_uuid=str(version_details[\"uuid\"]),\n version=version_details[\"version\"],\n log_uuid=log_uuid,\n )\n yield item\n\n metadata = {\n \"error\": error_occurs,\n \"error_log\": error_log,\n }\n await self._async_chat_log_to_cloud(\n session_uuid=session_uuid,\n version_uuid=version_details[\"uuid\"],\n chat_log_request_list=[\n ChatLogRequest(\n uuid=log_uuid,\n message=api_response.choices[0].message.model_dump(),\n metadata=metadata,\n api_response=api_response,\n )\n ],\n )\n\n return wrapper\n\n def _prepare_call_args(\n self,\n prompts: List[Dict[str, str]],\n version_detail: Dict[str, Any],\n inputs: Dict[str, Any],\n kwargs,\n ):\n stringified_inputs = {key: str(value) for key, value in inputs.items()}\n messages = [\n {\n \"content\": prompt[\"content\"].format(**stringified_inputs),\n \"role\": prompt[\"role\"],\n }\n for prompt in prompts\n ]\n call_args = {\n \"messages\": messages,\n \"model\": version_detail[\"model\"] if version_detail else None,\n \"parsing_type\": version_detail[\"parsing_type\"] if version_detail else None,\n \"output_keys\": version_detail[\"output_keys\"] if version_detail else None,\n }\n if call_args[\"parsing_type\"] is None:\n del call_args[\"parsing_type\"]\n del call_args[\"output_keys\"]\n\n if \"functions\" in kwargs:\n call_args[\"functions\"] = kwargs[\"functions\"]\n\n if \"tools\" in kwargs:\n call_args[\"tools\"] = kwargs[\"tools\"]\n\n if \"api_key\" in kwargs:\n call_args[\"api_key\"] = kwargs[\"api_key\"]\n return call_args\n\n def _prepare_call_args_for_chat(\n self,\n messages: List[Dict[str, Any]],\n version_detail: Dict[str, Any],\n kwargs,\n ):\n call_args = {}\n token_per_tools = 0\n if \"functions\" in kwargs:\n call_args[\"functions\"] = kwargs[\"functions\"]\n token_per_tools = num_tokens_from_functions_input(\n functions=kwargs[\"functions\"],\n model=version_detail[\"model\"] if version_detail else \"gpt-3.5-turbo\",\n )\n\n if \"tools\" in kwargs:\n call_args[\"tools\"] = kwargs[\"tools\"]\n token_per_tools = num_tokens_from_functions_input(\n functions=kwargs[\"tools\"],\n model=version_detail[\"model\"] if version_detail else \"gpt-3.5-turbo\",\n )\n\n # truncate messages to make length <= model's max length\n model_max_tokens = get_max_tokens(\n model=version_detail[\"model\"] if version_detail else \"gpt-3.5-turbo\"\n )\n token_per_messages = num_tokens_for_messages_for_each(\n messages, version_detail[\"model\"]\n )\n token_limit_exceeded = (\n sum(token_per_messages) + token_per_tools\n ) - model_max_tokens\n if token_limit_exceeded > 0:\n while token_limit_exceeded > 0:\n # erase the second oldest message (first one is system prompt, so it should not be erased)\n if len(messages) == 1:\n # if there is only one message, Error cannot be solved. Just call LLM and get error response\n break\n token_limit_exceeded -= token_per_messages[1]\n del messages[1]\n del token_per_messages[1]\n\n call_args[\"messages\"] = messages\n call_args[\"model\"] = version_detail[\"model\"] if version_detail else None\n\n if \"api_key\" in kwargs:\n call_args[\"api_key\"] = kwargs[\"api_key\"]\n\n if \"tools\" in kwargs:\n call_args[\"tools\"] = kwargs[\"tools\"]\n\n return call_args\n\n async def _async_log_to_cloud(\n self,\n version_uuid: str,\n log_uuid: str,\n inputs: Optional[Dict] = None,\n api_response: Optional[ModelResponse] = None,\n parsed_outputs: Optional[Dict] = None,\n metadata: Optional[Dict] = None,\n ):\n config = read_config()\n if (\n \"project\" in config\n and \"mask_inputs\" in config[\"project\"]\n and config[\"project\"][\"mask_inputs\"] == True\n ):\n inputs = {key: \"PRIVATE LOGGING\" for key, value in inputs.items()}\n\n # Perform the logging asynchronously\n if api_response:\n api_response_dict = api_response.model_dump()\n api_response_dict[\"response_ms\"] = api_response._response_ms\n api_response_dict[\"_response_ms\"] = api_response._response_ms\n else:\n api_response_dict = None\n run_log_request_body = {\n \"uuid\": log_uuid,\n \"api_response\": api_response_dict,\n \"inputs\": inputs,\n \"parsed_outputs\": parsed_outputs,\n \"metadata\": metadata,\n }\n res = await AsyncAPIClient.execute(\n method=\"POST\",\n path=\"/run_log\",\n params={\n \"version_uuid\": version_uuid,\n },\n json=run_log_request_body,\n use_cli_key=False,\n )\n if res.status_code != 200:\n print(f\"[red]Failed to log to cloud: {res.json()}[/red]\");\n \n if self.unit_config:\n res_connect = await AsyncAPIClient.execute(\n method=\"POST\",\n path=\"/unit/connect\",\n json={\n \"unit_log_uuid\": self.unit_config.log_uuid,\n \"run_log_uuid\": log_uuid, \n },\n use_cli_key=False,\n )\n if res_connect.status_code != 200:\n print(f\"[red]Failed to connect prompt component to run log: {res_connect.json()}[/red]\")\n\n return res\n\n async def _async_chat_log_to_cloud(\n self,\n session_uuid: str,\n version_uuid: Optional[str] = None,\n chat_log_request_list: List[ChatLogRequest] = [],\n ):\n # Perform the logging asynchronously\n\n res = await AsyncAPIClient.execute(\n method=\"POST\",\n path=\"/chat_log\",\n params={\n \"session_uuid\": session_uuid,\n \"version_uuid\": version_uuid,\n },\n json=[r.model_dump() for r in chat_log_request_list],\n use_cli_key=False,\n )\n if res.status_code != 200:\n print(f\"[red]Failed to log to cloud: {res.json()}[/red]\")\n return res\n\n async def _async_make_session_cloud(\n self,\n session_uuid: str,\n version_uuid: Optional[str] = None,\n ):\n # Perform the logging asynchronously\n res = await AsyncAPIClient.execute(\n method=\"POST\",\n path=\"/make_session\",\n params={\n \"session_uuid\": session_uuid,\n \"version_uuid\": version_uuid,\n },\n use_cli_key=False,\n )\n if res.status_code != 200:\n print(f\"[red]Failed to make ChatSession in cloud: {res.json()}[/red]\")\n return res\n\n def make_kwargs(self, **kwargs):\n res = {}\n for key, value in kwargs.items():\n if value is not None:\n res[key] = value\n return res\n\n def run(\n self,\n inputs: Dict[str, Any] = {},\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> LLMResponse:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_method(super().run)(inputs, **kwargs)\n\n def arun(\n self,\n inputs: Dict[str, Any] = {},\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> LLMResponse:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_async_method(super().arun)(inputs, **kwargs)\n\n def stream(\n self,\n inputs: Dict[str, Any] = {},\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> Generator[LLMStreamResponse, None, None]:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_gen(super().stream)(inputs, **kwargs)\n\n def astream(\n self,\n inputs: Optional[Dict[str, Any]] = {},\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> AsyncGenerator[LLMStreamResponse, None]:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_async_gen(super().astream)(inputs, **kwargs)\n\n def run_and_parse(\n self,\n inputs: Dict[str, Any] = {},\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> LLMResponse:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_method(super().run_and_parse)(inputs, **kwargs)\n\n def arun_and_parse(\n self,\n inputs: Dict[str, Any] = {},\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> LLMResponse:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_async_method(super().arun_and_parse)(inputs, **kwargs)\n\n def stream_and_parse(\n self,\n inputs: Dict[str, Any] = {},\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> Generator[LLMStreamResponse, None, None]:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_gen(super().stream_and_parse)(inputs, **kwargs)\n\n def astream_and_parse(\n self,\n inputs: Dict[str, Any] = {},\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> AsyncGenerator[LLMStreamResponse, None]:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_async_gen(super().astream_and_parse)(inputs, **kwargs)\n\n def chat_run(\n self,\n session_uuid: str,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> LLMResponse:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_chat(super().run)(session_uuid, **kwargs)\n\n def chat_arun(\n self,\n session_uuid: str,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> LLMResponse:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_async_chat(super().arun)(session_uuid, **kwargs)\n\n def chat_stream(\n self,\n session_uuid: str,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> LLMResponse:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_chat_gen(super().stream)(session_uuid, **kwargs)\n\n def chat_astream(\n self,\n session_uuid: str,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> LLMResponse:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_async_chat_gen(super().astream)(session_uuid, **kwargs)\n\n @staticmethod\n async def fetch_prompts(\n name,\n version: Optional[Union[str, int]] = \"deploy\",\n ) -> Tuple[List[Dict[str, str]], Dict[str, Any]]:\n \"\"\"fetch prompts.\n\n Args:\n name (str): name of FunctionModel\n\n Returns:\n Tuple[List[Dict[str, str]], Optional[Dict[str, Any]]]: (prompts, version_detail)\n \"\"\"\n # Check connection activate\n config = read_config()\n if (\n \"connection\" in config\n and \"initializing\" in config[\"connection\"]\n and config[\"connection\"][\"initializing\"] == True\n ):\n return [], {}\n elif (\n \"connection\" in config\n and \"reloading\" in config[\"connection\"]\n and config[\"connection\"][\"reloading\"] == True\n ):\n return [], {}\n else:\n if (\n \"project\" in config\n and \"use_cache\" in config[\"project\"]\n and config[\"project\"][\"use_cache\"] == True\n and version == \"deploy\"\n ):\n cache_manager = CacheManager()\n # call update_local API in background task\n cache_update_thread = Thread(\n target=cache_manager.cache_update_background_task, args=(config,)\n )\n cache_update_thread.daemon = True\n cache_update_thread.start()\n\n # get prompt from local DB by ratio\n prompt_rows, version_detail = get_deployed_prompts(name)\n if prompt_rows is None:\n return [], {}\n\n return [\n {\"role\": prompt.role, \"content\": prompt.content}\n for prompt in prompt_rows\n ], version_detail\n\n else:\n try:\n config_list = await AsyncAPIClient.execute(\n method=\"GET\",\n path=\"/function_model_versions\",\n params={\"function_model_name\": name, \"version\": version},\n use_cli_key=False,\n )\n config_list = config_list.json()\n except Exception as e:\n raise e\n\n function_model_versions = [\n x[\"function_model_version\"] for x in config_list\n ]\n\n if version == \"deploy\":\n for version in function_model_versions:\n if version[\"is_published\"] is True:\n version[\"ratio\"] = 1.0\n selected_version = select_version_by_ratio(function_model_versions)\n else:\n selected_version = function_model_versions[0]\n\n # config.prompts where config.function_model_version.uuid = selected_version.uuid\n prompt_rows = [\n config[\"prompts\"]\n for config in config_list\n if config[\"function_model_version\"][\"uuid\"]\n == selected_version[\"uuid\"]\n ][0]\n\n # sort prompt_rows by step\n prompt_rows = sorted(prompt_rows, key=lambda prompt: prompt[\"step\"])\n\n version_detail = {\n \"model\": selected_version[\"model\"],\n \"version\": selected_version[\"version\"],\n \"uuid\": selected_version[\"uuid\"],\n \"parsing_type\": selected_version[\"parsing_type\"],\n \"output_keys\": selected_version[\"output_keys\"],\n }\n\n if prompt_rows is None:\n return [], {}\n\n return [\n {\"role\": prompt[\"role\"], \"content\": prompt[\"content\"]}\n for prompt in prompt_rows\n ], version_detail\n\n @staticmethod\n async def fetch_chat_model(\n name: str,\n session_uuid: Optional[str] = None,\n version: Optional[Union[str, int]] = \"deploy\",\n ) -> Tuple[str, Dict[str, Any], List[Dict]]:\n \"\"\"fetch instruction and version detail\n\n Args:\n name (str): name of ChatModel\n\n Returns:\n Tuple[List[Dict[str, str]], Optional[Dict[str, Any]]]: (prompts, version_detail)\n \"\"\"\n # Check connection activate\n config = read_config()\n if (\n \"connection\" in config\n and \"initializing\" in config[\"connection\"]\n and config[\"connection\"][\"initializing\"] == True\n ):\n return \"\", {}, []\n elif (\n \"connection\" in config\n and \"reloading\" in config[\"connection\"]\n and config[\"connection\"][\"reloading\"] == True\n ):\n return \"\", {}, []\n else:\n try:\n res_data = await AsyncAPIClient.execute(\n method=\"GET\",\n path=\"/chat_model_versions_with_logs\",\n params={\n \"chat_model_name\": name,\n \"session_uuid\": session_uuid,\n \"version\": version,\n },\n use_cli_key=False,\n )\n res_data = res_data.json()\n except Exception as e:\n raise e\n chat_model_versions = res_data[\"chat_model_versions\"]\n\n if (\n session_uuid is None\n ): # if this is the initial call for deployed chat model\n if version == \"deploy\":\n for version in chat_model_versions:\n if version[\"is_published\"] is True:\n version[\"ratio\"] = 1.0\n selected_version = select_version_by_ratio(chat_model_versions)\n else:\n selected_version = chat_model_versions[0]\n else:\n selected_version = chat_model_versions[0]\n\n instruction: str = selected_version[\"system_prompt\"]\n\n version_detail = {\n \"model\": selected_version[\"model\"],\n \"uuid\": selected_version[\"uuid\"],\n \"version\": selected_version[\"version\"],\n }\n if session_uuid:\n chat_logs: List[Dict] = res_data[\"chat_logs\"]\n chat_logs = [{\"role\": \"system\", \"content\": instruction}] + chat_logs\n else:\n chat_logs = []\n\n # delete columns which value is None in each chat log\n for chat_log in chat_logs:\n for key in list(chat_log.keys()):\n if chat_log[key] is None:\n del chat_log[key]\n\n return instruction, version_detail, chat_logs\n\n # @staticmethod\n # async def fetch_chat_log(\n # session_uuid: str,\n # version: Optional[Union[str, int]] = \"deploy\",\n # ) -> List[Dict[str, Any]]:\n # \"\"\"fetch conversation log for session_uuid and version detail\n\n # Args:\n # session_uuid (str): session_uuid\n\n # Returns:\n # List[Dict[str, Any]] : list of conversation log\n # \"\"\"\n # config = read_config()\n # if \"connection\" in config and config[\"connection\"][\"initializing\"] == True:\n # return []\n # elif \"connection\" in config and config[\"connection\"][\"reloading\"] == True:\n # return []\n # else:\n # try:\n # res_data = await AsyncAPIClient.execute(\n # method=\"GET\",\n # path=\"/fetch_chat_logs\",\n # params={\"session_uuid\": session_uuid},\n # use_cli_key=False,\n # )\n # res_data = res_data.json()\n # except Exception as e:\n # raise e\n\n # # filter out unnecessary data\n # res_data = [\n # {\n # \"role\": message[\"role\"],\n # \"content\": message[\"content\"],\n # \"function_call\": message[\"function_call\"],\n # }\n # for message in res_data[\"chat_logs\"]\n # ]\n # return res_data" }, { "identifier": "logger", "path": "promptmodel/utils/logger.py", "snippet": "def debug(msg: Any, *args):\ndef success(msg: Any, *args):\ndef info(msg: Any, *args):\ndef warning(msg: Any, *args):\ndef error(msg: Any, *args):" }, { "identifier": "read_config", "path": "promptmodel/utils/config_utils.py", "snippet": "def read_config():\n \"\"\"\n Reads the configuration from the given filename.\n\n :return: A dictionary containing the configuration.\n \"\"\"\n if not os.path.exists(CONFIG_FILE):\n return {}\n\n with open(CONFIG_FILE, \"r\") as file:\n config = yaml.safe_load(file) or {}\n return config" }, { "identifier": "upsert_config", "path": "promptmodel/utils/config_utils.py", "snippet": "def upsert_config(new_config: Dict[str, Any], section: str = None):\n \"\"\"\n Upserts the given configuration file with the given configuration.\n\n :param new_config: A dictionary containing the new configuration.\n :param section: The section of the configuration to update.\n \"\"\"\n config = read_config()\n if section:\n config_section = config.get(section, {})\n new_config = {section: merge_dict(config_section, new_config)}\n config = merge_dict(config, new_config)\n # If . directory does not exist, create it\n if not os.path.exists(\"./.promptmodel\"):\n os.mkdir(\"./.promptmodel\")\n\n with open(CONFIG_FILE, \"w\") as file:\n yaml.safe_dump(config, file, default_flow_style=False)" }, { "identifier": "check_connection_status_decorator", "path": "promptmodel/utils/config_utils.py", "snippet": "def check_connection_status_decorator(method):\n if asyncio.iscoroutinefunction(method):\n\n @wraps(method)\n async def async_wrapper(self, *args, **kwargs):\n config = read_config()\n if \"connection\" in config and (\n (\n \"initializing\" in config[\"connection\"]\n and config[\"connection\"][\"initializing\"]\n )\n or (\n \"reloading\" in config[\"connection\"]\n and config[\"connection\"][\"reloading\"]\n )\n ):\n return\n else:\n if \"config\" not in kwargs:\n kwargs[\"config\"] = config\n return await method(self, *args, **kwargs)\n\n # async_wrapper.__name__ = method.__name__\n # async_wrapper.__doc__ = method.__doc__\n return async_wrapper\n else:\n\n @wraps(method)\n def wrapper(self, *args, **kwargs):\n config = read_config()\n if \"connection\" in config and (\n (\n \"initializing\" in config[\"connection\"]\n and config[\"connection\"][\"initializing\"]\n )\n or (\n \"reloading\" in config[\"connection\"]\n and config[\"connection\"][\"reloading\"]\n )\n ):\n return\n else:\n return method(self, *args, **kwargs)\n\n # wrapper.__name__ = method.__name__\n # wrapper.__doc__ = method.__doc__\n return wrapper" }, { "identifier": "run_async_in_sync", "path": "promptmodel/utils/async_utils.py", "snippet": "def run_async_in_sync(coro: Coroutine):\n try:\n loop = asyncio.get_running_loop()\n except RuntimeError: # No running loop\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n result = loop.run_until_complete(coro)\n # loop.close()\n return result\n\n return loop.run_until_complete(coro)" }, { "identifier": "LLMStreamResponse", "path": "promptmodel/types/response.py", "snippet": "class LLMStreamResponse(OpenAIObject):\n api_response: Optional[ModelResponse] = None\n raw_output: Optional[str] = None\n parsed_outputs: Optional[Dict[str, Any]] = None\n error: Optional[bool] = None\n error_log: Optional[str] = None\n function_call: Optional[ChoiceDeltaFunctionCall] = None\n tool_calls: Optional[List[ChoiceDeltaToolCall]] = None\n pm_detail: Optional[PMDetail] = None" }, { "identifier": "LLMResponse", "path": "promptmodel/types/response.py", "snippet": "class LLMResponse(OpenAIObject):\n api_response: Optional[ModelResponse] = None\n raw_output: Optional[str] = None\n parsed_outputs: Optional[Dict[str, Any]] = None\n error: Optional[bool] = None\n error_log: Optional[str] = None\n function_call: Optional[FunctionCall] = None\n tool_calls: Optional[List[ChatCompletionMessageToolCall]] = None\n pm_detail: Optional[PMDetail] = None" }, { "identifier": "ChatModelConfig", "path": "promptmodel/types/response.py", "snippet": "class ChatModelConfig(BaseModel):\n system_prompt: str\n model: str\n name: str\n version_uuid: str\n version: int\n message_logs: Optional[List[Dict]] = []" }, { "identifier": "InstanceType", "path": "promptmodel/types/enums.py", "snippet": "class InstanceType(str, Enum):\n ChatLog = \"ChatLog\"\n RunLog = \"RunLog\"\n ChatLogSession = \"ChatLogSession\"" }, { "identifier": "ChatLogRequest", "path": "promptmodel/types/request.py", "snippet": "class ChatLogRequest(BaseModel):\n uuid: Optional[str] = None\n message: Dict[str, Any]\n metadata: Optional[Dict] = None\n api_response: Optional[ModelResponse] = None\n\n def __post_init__(\n self,\n ):\n if self.api_response is not None and self.message is None:\n self.message = self.api_response.choices[0].message.model_dump()" }, { "identifier": "AsyncAPIClient", "path": "promptmodel/apis/base.py", "snippet": "class AsyncAPIClient:\n \"\"\"\n A class to represent an Async API request client.\n Used in Deployment stage.\n\n ...\n\n Methods\n -------\n get_headers():\n Generates headers for the API request.\n execute(method=\"GET\", params=None, data=None, json=None, **kwargs):\n Executes the API request.\n \"\"\"\n\n @classmethod\n async def _get_headers(cls, use_cli_key: bool = True) -> Dict:\n \"\"\"\n Reads, decrypts the api_key, and returns headers for API request.\n\n Returns\n -------\n dict\n a dictionary containing the Authorization header\n \"\"\"\n config = read_config()\n if use_cli_key:\n if \"connection\" not in config:\n print(\n \"User not logged in. Please run [violet]prompt login[/violet] first.\"\n )\n exit()\n\n encrypted_key = config[\"connection\"][\"encrypted_api_key\"]\n if encrypted_key is None:\n raise Exception(\"No API key found. Please run 'prompt login' first.\")\n decrypted_key = decrypt_message(encrypted_key)\n else:\n decrypted_key = os.environ.get(\"PROMPTMODEL_API_KEY\")\n if decrypted_key is None:\n raise Exception(\n \"PROMPTMODEL_API_KEY was not found in the current environment.\"\n )\n headers = {\"Authorization\": f\"Bearer {decrypted_key}\"}\n return headers\n\n @classmethod\n async def execute(\n cls,\n path: str,\n method=\"GET\",\n params: Dict = None,\n data: Dict = None,\n json: Dict = None,\n ignore_auth_error: bool = False,\n use_cli_key: bool = True,\n **kwargs,\n ) -> requests.Response:\n \"\"\"\n Executes the API request with the decrypted API key in the headers.\n\n Parameters\n ----------\n method : str, optional\n The HTTP method of the request (default is \"GET\")\n params : dict, optional\n The URL parameters to be sent with the request\n data : dict, optional\n The request body to be sent with the request\n json : dict, optional\n The JSON-encoded request body to be sent with the request\n ignore_auth_error: bool, optional\n Whether to ignore authentication errors (default is False)\n **kwargs : dict\n Additional arguments to pass to the requests.request function\n\n Returns\n -------\n requests.Response\n The response object returned by the requests library\n \"\"\"\n url = f\"{ENDPOINT_URL}{path}\"\n headers = await cls._get_headers(use_cli_key)\n try:\n async with httpx.AsyncClient(http2=True) as _client:\n response = await _client.request(\n method,\n url,\n headers=headers,\n params=params,\n data=data,\n json=json,\n **kwargs,\n )\n if not response:\n print(f\"[red]Error: {response}[/red]\")\n if response.status_code == 200:\n return response\n elif response.status_code == 403:\n if not ignore_auth_error:\n print(\"[red]Authentication failed.[/red]\")\n else:\n print(f\"[red]Error: {response}[/red]\")\n\n return response\n except requests.exceptions.ConnectionError:\n print(\"[red]Could not connect to the Promptmodel API.[/red]\")\n except requests.exceptions.Timeout:\n print(\"[red]The request timed out.[/red]\")\n except Exception as exception:\n print(f\"[red]Error: {exception}[/red]\")" } ]
from dataclasses import dataclass from typing import Any, Dict, List, Optional, Coroutine, Union from uuid import uuid4 from litellm import ModelResponse from promptmodel import DevClient from promptmodel.llms.llm_proxy import LLMProxy from promptmodel.utils import logger from promptmodel.utils.config_utils import ( read_config, upsert_config, check_connection_status_decorator, ) from promptmodel.utils.async_utils import run_async_in_sync from promptmodel.types.response import LLMStreamResponse, LLMResponse, ChatModelConfig from promptmodel.types.enums import InstanceType from promptmodel.types.request import ChatLogRequest from promptmodel.apis.base import AsyncAPIClient import sys
12,376
# Find an instance of Client among global variables for var_name, var_val in global_vars.items(): if isinstance(var_val, DevClient): return var_val return None class ChatModel(metaclass=RegisteringMeta): """ Args: name (_type_): _description_ version (Optional[ Union[str, int] ], optional): Choose which FunctionModel version to use. Defaults to "deploy". It can be "deploy", "latest", or version number. api_key (Optional[str], optional): API key for the LLM. Defaults to None. If None, use api_key in .env file. """ def __init__( self, name, session_uuid: str = None, version: Optional[Union[str, int]] = "deploy", api_key: Optional[str] = None, ): self.name = name self.api_key = api_key self.llm_proxy = LLMProxy(name, version) self.version = version self.recent_log_uuid = None if session_uuid is None: self.session_uuid = str(uuid4()) instruction, version_details, chat_logs = run_async_in_sync( LLMProxy.fetch_chat_model(self.name, None, version) ) config = read_config() if ( "connection" in config and "initializing" in config["connection"] and config["connection"]["initializing"] == True ): return elif ( "connection" in config and "reloading" in config["connection"] and config["connection"]["reloading"] == True ): return else: run_async_in_sync( self.llm_proxy._async_make_session_cloud( self.session_uuid, version_details["uuid"], ) ) else: self.session_uuid = session_uuid @check_connection_status_decorator def get_config( self, *args, **kwargs, ) -> ChatModelConfig: """Get config for the ChatModel. It will fetch the published prompt and version config from the Cloud. (It will be saved in cache DB, so there is no extra latency for API call.) - If you made A/B testing in Web Dashboard, it will fetch the prompt randomly by the A/B testing ratio. If dev mode is initializing, it will return None Returns: ChatModelConfig: config for the ChatModel, which contains prompts and version_detail, message_logs """ prompt, version_detail, message_logs = run_async_in_sync( LLMProxy.fetch_chat_model(self.name, self.session_uuid, self.version) ) return ChatModelConfig( system_prompt=prompt, model=version_detail["model"], name=self.name, version_uuid=str(version_detail["uuid"]), version=version_detail["version"], message_logs=message_logs, ) @check_connection_status_decorator def add_messages( self, new_messages: List[Dict[str, Any]], metadata_list: List[Optional[Dict]] = [], *args, **kwargs, ) -> None: """Add messages to the chat model. Args: new_messages (List[Dict[str, Any]]): list of messages. Each message is a dict with 'role', 'content', and 'function_call'. """ # Save messages to Cloud DB log_uuid_list = [str(uuid4()) for _ in range(len(new_messages))] run_async_in_sync( self.llm_proxy._async_chat_log_to_cloud( session_uuid=str(self.session_uuid), version_uuid=None, chat_log_request_list=[ ChatLogRequest(**{"message": message, "uuid": str(uuid4())}) for message in new_messages ], ) ) self.recent_log_uuid = log_uuid_list[-1] @check_connection_status_decorator def run( self, functions: Optional[List[Dict[str, Any]]] = None, tools: Optional[List[Dict[str, Any]]] = None, stream: Optional[bool] = False, *args, **kwargs,
from __future__ import annotations class RegisteringMeta(type): def __call__(cls, *args, **kwargs): instance: ChatModel = super().__call__(*args, **kwargs) # Find the global client instance in the current context client = cls.find_client_instance() if client is not None: client.register_chat_model(instance.name) return instance @staticmethod def find_client_instance(): # Get the current frame frame = sys._getframe(2) # Get global variables in the current frame global_vars = frame.f_globals # Find an instance of Client among global variables for var_name, var_val in global_vars.items(): if isinstance(var_val, DevClient): return var_val return None class ChatModel(metaclass=RegisteringMeta): """ Args: name (_type_): _description_ version (Optional[ Union[str, int] ], optional): Choose which FunctionModel version to use. Defaults to "deploy". It can be "deploy", "latest", or version number. api_key (Optional[str], optional): API key for the LLM. Defaults to None. If None, use api_key in .env file. """ def __init__( self, name, session_uuid: str = None, version: Optional[Union[str, int]] = "deploy", api_key: Optional[str] = None, ): self.name = name self.api_key = api_key self.llm_proxy = LLMProxy(name, version) self.version = version self.recent_log_uuid = None if session_uuid is None: self.session_uuid = str(uuid4()) instruction, version_details, chat_logs = run_async_in_sync( LLMProxy.fetch_chat_model(self.name, None, version) ) config = read_config() if ( "connection" in config and "initializing" in config["connection"] and config["connection"]["initializing"] == True ): return elif ( "connection" in config and "reloading" in config["connection"] and config["connection"]["reloading"] == True ): return else: run_async_in_sync( self.llm_proxy._async_make_session_cloud( self.session_uuid, version_details["uuid"], ) ) else: self.session_uuid = session_uuid @check_connection_status_decorator def get_config( self, *args, **kwargs, ) -> ChatModelConfig: """Get config for the ChatModel. It will fetch the published prompt and version config from the Cloud. (It will be saved in cache DB, so there is no extra latency for API call.) - If you made A/B testing in Web Dashboard, it will fetch the prompt randomly by the A/B testing ratio. If dev mode is initializing, it will return None Returns: ChatModelConfig: config for the ChatModel, which contains prompts and version_detail, message_logs """ prompt, version_detail, message_logs = run_async_in_sync( LLMProxy.fetch_chat_model(self.name, self.session_uuid, self.version) ) return ChatModelConfig( system_prompt=prompt, model=version_detail["model"], name=self.name, version_uuid=str(version_detail["uuid"]), version=version_detail["version"], message_logs=message_logs, ) @check_connection_status_decorator def add_messages( self, new_messages: List[Dict[str, Any]], metadata_list: List[Optional[Dict]] = [], *args, **kwargs, ) -> None: """Add messages to the chat model. Args: new_messages (List[Dict[str, Any]]): list of messages. Each message is a dict with 'role', 'content', and 'function_call'. """ # Save messages to Cloud DB log_uuid_list = [str(uuid4()) for _ in range(len(new_messages))] run_async_in_sync( self.llm_proxy._async_chat_log_to_cloud( session_uuid=str(self.session_uuid), version_uuid=None, chat_log_request_list=[ ChatLogRequest(**{"message": message, "uuid": str(uuid4())}) for message in new_messages ], ) ) self.recent_log_uuid = log_uuid_list[-1] @check_connection_status_decorator def run( self, functions: Optional[List[Dict[str, Any]]] = None, tools: Optional[List[Dict[str, Any]]] = None, stream: Optional[bool] = False, *args, **kwargs,
) -> LLMResponse:
8
2023-10-09 03:35:44+00:00
16k
cambridgeltl/ClaPS
run_prune_search.py
[ { "identifier": "PromptedClassificationReward", "path": "rewards/text_classification_reward.py", "snippet": "class PromptedClassificationReward:\n def __init__(\n self,\n args,\n task_lm: str,\n is_mask_lm: Optional[bool],\n num_classes: int,\n verbalizers: List[str],\n reward_type: str = \"entropy\",\n compute_zscore: bool = True,\n incorrect_coeff: float = 180.0, # lambda_1 in paper\n correct_coeff: float = 200.0, # lambda_2 in paper\n use_bn_calibration: bool = False,\n bn_calibrator: Optional[BatchNormCalibrate] = None,\n template: Optional[str] = None,\n gpu_id: Optional[int] = None,\n ):\n \"\"\"\n Few shot text classification reward (adapted from RLPrompt repository)\n Args:\n task_lm: the string specifying the language model type of the task LM\n is_mask_lm: bool. Whether the LM is masked, or left-to-right.\n compute_zscore: bool. Whether do reward normalization by normalizing the\n mean and standard deviation across the batch.\n incorrect_coeff, correct_coeff:\n num_classes: number of classes in the labels\n verbalizers: a list of verbalizers (for e.g., for sentiment classification)\n reward_type: the type of the reward.\n \"gap\" -- use the one proposed in RLPrompt\n \"ll\" -- use the usual cross entropy loss\n template: the template to organize the queries and prompts.\n default one is [Input][Prompt][MASK].\n default template is adopted when it is not specified.\n bn_calibrator: an optional batch norm calibrator. When provided,\n in inference mode the logits will be first normalised by it first. The\n calibrator must be initialized when passed to this class.\n This class essentially provides the objective function for BO/RL/any other\n prompt optimizer.\n \"\"\"\n super().__init__()\n if torch.cuda.is_available():\n if gpu_id:\n self.device = torch.device(f\"cuda:{gpu_id}\")\n else:\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(\"cpu\")\n # self.device = torch.device(\"cpu\")\n self.args = args\n self.task_lm = task_lm\n if is_mask_lm is None:\n # If False, then treat as left-to-right LM\n self.is_mask_lm = True if \"bert\" in self.task_lm else False\n else:\n self.is_mask_lm = is_mask_lm\n assert reward_type in [\"gap\", \"cross_entropy\", \"entropy\"]\n self.reward_type = reward_type\n print(\"Task LM:\", self.task_lm)\n if self.is_mask_lm:\n assert self.task_lm in SUPPORTED_MASK_LMS\n self._tokenizer = AutoTokenizer.from_pretrained(self.task_lm)\n self._generator = AutoModelForMaskedLM.from_pretrained(self.task_lm).to(\n self.device\n )\n else:\n self._generator = T5ForConditionalGeneration.from_pretrained(\n self.task_lm\n ).to(self.device)\n self._tokenizer = AutoTokenizer.from_pretrained(\n self.task_lm, use_fast=False\n )\n\n self.compute_zscore = compute_zscore\n self.incorrect_coeff = incorrect_coeff\n self.correct_coeff = correct_coeff\n self.num_classes = num_classes\n print(\"Num classes:\", self.num_classes)\n self.verbalizers = verbalizers\n print(\"Verbalizers:\", self.verbalizers)\n self.verbalizer_ids = [\n self._tokenizer.convert_tokens_to_ids(v) for v in self.verbalizers\n ]\n print(\"Verbalizer ids:\", self.verbalizer_ids)\n if template is None:\n self.template = self.load_default_template() # prompt templates\n else:\n self.template = template\n self.use_bn_calibration = use_bn_calibration\n self.bn_calibrator = bn_calibrator\n self._counter = 0\n\n def to(self, device):\n self._generator.to(device)\n\n def load_default_template(self) -> List[str]:\n template_dict = {\n \"xnli\": [\n \" {prompt} {sentence_1} {sentence_2} Entailment: \", \n \" {prompt}. In this task, the goal is to predict textual entailment with 'yes' 'maybe' 'no'. sentence A implies sentence B entailment: yes; sentence A is neutral to sentence B entailment: maybe; sentence A contradicts sentence B entailment: no. Sentence A: {sentence_1}, Sentence B: {sentence_2}, Entailment: \", \n ],\n \"mnli\": [\n \" {prompt} {sentence_1} {sentence_2} Entailment: \",\n \" {prompt}. In this task, the goal is to predict textual entailment with 'yes' 'maybe' 'no'. sentence A implies sentence B entailment: yes; sentence A is neutral to sentence B entailment: maybe; sentence A contradicts sentence B entailment: no. Sentence A: {sentence_1}, Sentence B: {sentence_2}, Entailment: \", \n ],\n \"snli\": [\n \" {prompt} {sentence_1} {sentence_2} Entailment: \",\n \" {prompt}. In this task, the goal is to predict textual entailment with 'yes' 'maybe' 'no'. sentence A implies sentence B entailment: yes; sentence A is neutral to sentence B entailment: maybe; sentence A contradicts sentence B entailment: no. Sentence A: {sentence_1}, Sentence B: {sentence_2}, Entailment: \",\n ],\n \"rte\": [\n \" {prompt}. Sentence 1: {sentence_1}, Sentence 2: {sentence_2}, Textual Entailment: \",\n ],\n \"sst2\": [\n \" {prompt}. Sentence: {sentence_1}, Sentiment: \",\n ],\n \"mrpc\": [\n \" {prompt}. Sentence 1: {sentence_1}, Sentence 2: {sentence_2}, Semantically Equivalent: \",\n ],\n \"qnli\": [\n \" {prompt}. Question: {sentence_1}, Sentence: {sentence_2}, Entailment: \",\n ],\n \"qqp\": [\n \" {prompt}. Sentence 1: {sentence_1}, Sentence 2: {sentence_2}, Semantically Equivalent: \",\n ],\n \"ag_news\": [\n \" {prompt}. Classify the news articles into the categories of World, Sports, Business, and Technology. {sentence_1}: \",\n \"{prompt}\\n\\n{sentence_1}\\n\\nWhich topic is this article about?\\nWorld, Sports, Business, Technology, \",\n ],\n }\n if \"anli\" in self.args[\"dataset_name\"]:\n template = template_dict[\"anli\"][self.args[\"template_id\"]]\n elif (\n \"xnli\" in self.args[\"dataset_name\"]\n or \"americas_nli\" in self.args[\"dataset_name\"]\n ):\n template = template_dict[\"xnli\"][self.args[\"template_id\"]]\n else:\n if self.args[\"dataset_name\"] in template_dict:\n template = template_dict[self.args[\"dataset_name\"]][\n self.args[\"template_id\"]\n ]\n if self.is_mask_lm:\n mask_token = self._tokenizer.mask_token\n print(mask_token)\n simple_list = [\"SetFit/sst2\", \"SetFit/CR\", \"rotten_tomatoes\", \"SetFit/sst5\"]\n long_list = [\"yelp_polarity\", \"yelp_review_full\"]\n hard_list = [\"ag_news\"]\n rl_list = [\n \"rl-agnews\",\n \"rl-cr\",\n \"rl-mr\",\n \"rl-sst-2\",\n \"rl-sst-5\",\n \"rl-yelp-2\",\n \"rl-yelp-5\",\n ]\n if self.args[\"dataset_name\"] in simple_list:\n template = f\" {{prompt}} {{sentence_1}} It was {mask_token}.\"\n elif self.args[\"dataset_name\"] in long_list:\n template = f\" {{prompt}} It was {mask_token}. {{sentence_1}}\"\n elif self.args[\"dataset_name\"] in hard_list:\n template = f\" {{prompt}} {mask_token} News: {{sentence_1}}\"\n elif self.args[\"dataset_name\"] in rl_list:\n template = f\" {{prompt}} {{sentence_1}} It was {mask_token}.\"\n return template\n\n def __call__(self, *args: Any, **kwds: Any) -> Any:\n return self.forward(*args, **kwds)\n\n def forward(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n # output_token: Union[List[str], str],\n to_tensor: bool,\n mode: str = \"train\",\n verbose: bool = True,\n accumulate_class: bool = False,\n ) -> Tuple[Union[List[float], torch.Tensor], Dict[str, Any]]:\n \"\"\"\n This computes the reward of the current prompt.\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n assert mode in [\"train\", \"infer\"]\n if mode == \"train\":\n self._counter += 1\n\n # Process prompts and verbalizer indices\n if isinstance(output_tokens, list):\n if isinstance(output_tokens[0], list):\n prompt_tokens = output_tokens\n prompt_strings = self._convert_tokens_to_string(prompt_tokens)\n elif isinstance(output_tokens[0], str):\n prompt_strings = output_tokens\n elif isinstance(output_tokens, str):\n prompt_strings = [output_tokens] # Single prompt string\n\n rewards: List[torch.Tensor] = []\n accs: List[float] = []\n confs: List[float] = []\n entropies: List[float] = []\n class_logits: List[torch.Tensor] = []\n\n counter_list = []\n input_rewards: Dict[str, List[float]] = defaultdict(list)\n quantities_to_log = {}\n for i, prompt in enumerate(prompt_strings):\n # Compute LM logits\n current_prompts = [prompt for _ in source_texts]\n formatted_templates = self._format_prompts(\n source_texts, source_2_texts, current_prompts\n )\n all_logits = self._get_logits(formatted_templates)\n (\n reward,\n acc,\n correct_predictions,\n conf,\n entropy,\n class_logit,\n ) = _compute_reward(\n all_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n bn_calibrator=self.bn_calibrator if self.use_bn_calibration else None,\n )\n\n rewards.append(reward)\n accs.append(acc.item())\n confs.append(conf.item())\n entropies.append(entropy.item())\n counter_list.append(correct_predictions)\n class_logits.append(class_logit)\n\n # keep track of rewards for z-score normalization\n input_rewards[\"z\"] += [reward.item()]\n\n # Print examples\n if verbose:\n print_strs = [\n \"Accuracy:\",\n acc.item(),\n \"|\",\n \"Reward:\",\n round(reward.item(), 2),\n ]\n print(*print_strs)\n rewards_tensor = torch.stack(rewards)\n accs_tensor = torch.tensor(accs)\n confs_tensor = torch.tensor(confs)\n entropies_tensor = torch.tensor(entropies)\n # compute the expected calibration error (ECE) by accs_tensor and confs_tensor\n ece = torch.abs(accs_tensor - confs_tensor).mean()\n\n # z-score normalization (2nd stage)\n if mode == \"train\" and self.compute_zscore:\n input_reward_means = {k: np.mean(v) for k, v in input_rewards.items()}\n input_reward_stds = {k: np.std(v) for k, v in input_rewards.items()}\n # not source strings\n idx_means = torch.tensor(input_reward_means[\"z\"]).float()\n idx_stds = torch.tensor(input_reward_stds[\"z\"]).float()\n rewards_tensor = (rewards_tensor - idx_means) / (idx_stds + 1e-4)\n quantities_to_log[prompt_strings[i]][\"resized_reward\"] = []\n for i in range(rewards_tensor.size(0)):\n quantities_to_log[prompt_strings[i]][\"resized_reward\"].append(\n rewards_tensor[i].item()\n )\n elif mode == \"infer\": # Optional: Predict Val Prompts\n score = rewards_tensor.mean().item()\n if verbose:\n print(f\"Our prompt: {prompt_strings}. Score={score}. Acc={acc}\")\n for pt in prompt_strings:\n print(self._tokenizer.tokenize(pt))\n print(accumulate_class)\n print(\"counter_list\", counter_list)\n print(\"ece\", ece)\n if accumulate_class:\n return (\n prompt_strings,\n rewards_tensor,\n accs_tensor,\n counter_list,\n ece,\n entropies_tensor,\n class_logits, # <- list of tensors. n elements = n prompts\n )\n else:\n return prompt_strings, rewards_tensor, accs_tensor\n\n if to_tensor is True:\n return rewards_tensor, accs_tensor, quantities_to_log\n else:\n return rewards_tensor.tolist(), accs, quantities_to_log\n\n def kl_divergence_row_by_row(self, p, q):\n kl_div = torch.sum(p * torch.log(p / q), dim=1)\n return kl_div\n\n def compute_default_kl(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n ) -> torch.Tensor:\n \"\"\"\n This computes the probs of the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n default_templates = self._format_prompts(\n source_texts, source_2_texts, [\"\" for _ in source_texts]\n )\n default_logits = self._get_logits(default_templates)\n default_probs = _compute_probs(\n default_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n return default_probs\n\n def compute_default_reward(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n ) -> torch.Tensor:\n \"\"\"\n This computes the rewards of the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n default_templates = self._format_prompts(\n source_texts, source_2_texts, [\"\" for _ in source_texts]\n )\n default_logits = self._get_logits(default_templates)\n default_reward, _, _, _, _, _ = _compute_reward(\n default_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n return default_reward\n\n def compute_kl(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n default_probs: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n This computes the kl-divergence of the current prompt to the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n # Process prompts and verbalizer indices\n if isinstance(output_tokens, list):\n if isinstance(output_tokens[0], list):\n prompt_tokens = output_tokens\n prompt_strings = self._convert_tokens_to_string(prompt_tokens)\n elif isinstance(output_tokens[0], str):\n prompt_strings = output_tokens\n elif isinstance(output_tokens, str):\n prompt_strings = [output_tokens] # Single prompt string\n\n rewards: List[torch.Tensor] = []\n input_rewards: Dict[str, List[float]] = defaultdict(list)\n for i, prompt in enumerate(prompt_strings):\n # Compute LM logits\n current_prompts = [prompt for _ in source_texts]\n formatted_templates = self._format_prompts(\n source_texts, source_2_texts, current_prompts\n )\n all_logits = self._get_logits(formatted_templates)\n prompt_probs = _compute_probs(\n all_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n kl = self.kl_divergence_row_by_row(prompt_probs, default_probs)\n kl = torch.sum(kl)\n rewards.append(kl)\n kl_tensor = torch.stack(rewards)\n return kl_tensor\n\n def compute_reward_diff(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n default_rewards: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n This computes the kl-divergence of the current prompt to the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n # Process prompts and verbalizer indices\n if isinstance(output_tokens, list):\n if isinstance(output_tokens[0], list):\n prompt_tokens = output_tokens\n prompt_strings = self._convert_tokens_to_string(prompt_tokens)\n elif isinstance(output_tokens[0], str):\n prompt_strings = output_tokens\n elif isinstance(output_tokens, str):\n prompt_strings = [output_tokens] # Single prompt string\n\n rewards: List[torch.Tensor] = []\n for i, prompt in enumerate(prompt_strings):\n # Compute LM logits\n current_prompts = [prompt for _ in source_texts]\n formatted_templates = self._format_prompts(\n source_texts, source_2_texts, current_prompts\n )\n all_logits = self._get_logits(formatted_templates)\n prompt_rewards, _, _, _, _, _ = _compute_reward(\n all_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n reward_diff = prompt_rewards - default_rewards\n reward_diff = torch.sum(reward_diff)\n rewards.append(reward_diff)\n reward_diff_tensor = torch.stack(rewards)\n return reward_diff_tensor\n\n # Adapted from\n # https://huggingface.co/docs/transformers/v4.21.1/en/task_summary#masked-language-modeling\n def _get_mask_token_index(self, input_ids: torch.Tensor) -> np.ndarray:\n mask_token_index = torch.where(input_ids == self._tokenizer.mask_token_id)[1]\n return mask_token_index\n\n def ensure_exactly_one_mask_token(\n self, model_inputs: Dict[str, torch.Tensor]\n ) -> None:\n for input_ids in model_inputs[\"input_ids\"]:\n masked_index = self._get_mask_token_index(input_ids)\n numel = np.prod(masked_index.shape)\n assert numel == 1\n\n @torch.no_grad()\n def _get_logits(self, texts: List[str]) -> torch.Tensor:\n # for MLM, add mask token\n batch_size = len(texts)\n encoded_inputs = self._tokenizer(\n texts,\n padding=\"longest\",\n truncation=True,\n return_tensors=\"pt\",\n add_special_tokens=True,\n )\n decoder_input_ids = (\n torch.ones((batch_size, 1)) * torch.tensor(self._tokenizer.pad_token_id)\n ).int()\n if self.is_mask_lm:\n # self.ensure_exactly_one_mask_token(encoded_inputs) TODO\n token_logits = self._generator(**encoded_inputs.to(self.device)).logits\n mask_token_indices = self._get_mask_token_index(encoded_inputs[\"input_ids\"])\n out_logits = token_logits[range(batch_size), mask_token_indices, :]\n return out_logits\n else:\n token_logits = self._generator(\n input_ids=encoded_inputs[\"input_ids\"].to(self.device),\n decoder_input_ids=decoder_input_ids.to(self.device),\n ).logits\n token_logits = token_logits[:, 0, :]\n return token_logits\n\n def _convert_tokens_to_string(self, tokens: List[List[str]]) -> List[str]:\n return [self._tokenizer.convert_tokens_to_string(s) for s in tokens]\n\n def _format_prompts(\n self,\n source_strs: List[str],\n source_2_strs: List[str],\n prompt_strs: List[str],\n ) -> List[str]:\n return [\n self.template.format(sentence_1=s_1, sentence_2=s_2, prompt=p)\n for s_1, s_2, p in zip(source_strs, source_2_strs, prompt_strs)\n ]" }, { "identifier": "PromptedClassificationDataset", "path": "utils/fsc_datasets.py", "snippet": "class PromptedClassificationDataset:\n def __init__(self, args):\n self.args = args\n self.glue_list = ['sst2', 'rte', 'mrpc', 'qqp', 'mnli', 'qnli']\n self.superglue_list = ['cb', 'copa', 'boolq', 'wic', 'wsc']\n self.nli_3_list = ['mnli', 'xnli', 'anli', 'cb', 'snli']\n if 'xnli' in args['dataset_name']:\n split = self.args['dataset_name'].split('_')[1]\n self.dataset = datasets.load_dataset('xnli', split)\n elif args['dataset_name'] in self.glue_list:\n self.dataset = datasets.load_dataset('glue', args['dataset_name'])\n elif 'anli' in args['dataset_name']:\n self.dataset = datasets.load_dataset('anli')\n elif args['dataset_name'] in self.superglue_list:\n self.dataset = datasets.load_dataset('super_glue', args['dataset_name'])\n elif 'rl' in args['dataset_name']:\n pass\n else:\n self.dataset = datasets.load_dataset(args['dataset_name'])\n def get_few_shot_dataset(self, shots: int) -> tuple:\n \"\"\"\n Retrieves a few-shot dataset by selecting a specified number of instances per class from the given dataset.\n \n Args:\n dataset (dict): A dictionary containing the dataset split into \"train\", \"validation\", and \"test\" subsets.\n shots (int): The number of instances to select per class for the few-shot dataset.\n \n Returns:\n tuple: The few-shot training dataset, the original validation dataset, and the original test dataset.\n \"\"\"\n \n if self.args['dataset_name'] == 'mnli':\n train_dataset = self.dataset['train']\n val_dataset = self.dataset['validation_matched']\n test_dataset = self.dataset['test_matched']\n elif self.args['dataset_name'] == 'yelp_polarity' or self.args['dataset_name'] == 'ag_news' or self.args['dataset_name'] == 'SetFit/CR' or self.args['dataset_name'] == 'yelp_review_full':\n train_dataset = self.dataset['train']\n val_dataset = self.dataset['train']\n test_dataset = self.dataset['test']\n elif 'rl' in self.args['dataset_name']:\n train_dataset = get_rl_data('train', self.args['dataset_name'], self.args['seed'])\n val_dataset = get_rl_data('dev', self.args['dataset_name'], self.args['seed'])\n test_dataset = get_rl_data('test', self.args['dataset_name'], self.args['seed'])\n train_dataset = [x for x in train_dataset]\n val_dataset = [x for x in val_dataset]\n return train_dataset, val_dataset, test_dataset\n elif self.args['dataset_name'] == 'snli':\n train_dataset = [x for x in self.dataset['train'] if x['label'] != -1]\n val_dataset = [x for x in self.dataset['validation'] if x['label'] != -1]\n test_dataset = [x for x in self.dataset['test'] if x['label'] != -1]\n else:\n train_dataset = self.dataset['train']\n val_dataset = self.dataset['validation']\n test_dataset = self.dataset['test']\n\n train_0 = [x for x in train_dataset if x['label'] == 0][:shots]\n train_1 = [x for x in train_dataset if x['label'] == 1][:shots]\n train_2 = [x for x in train_dataset if x['label'] == 2][:shots]\n train_3 = [x for x in train_dataset if x['label'] == 3][:shots]\n train_4 = [x for x in train_dataset if x['label'] == 4][:shots]\n train_dataset = train_0 + train_1 + train_2 + train_3 + train_4\n if self.args['dataset_name'] in self.glue_list or self.args['dataset_name'] in self.superglue_list:\n val_0 = [x for x in train_dataset if x['label'] == 0][-shots:]\n val_1 = [x for x in train_dataset if x['label'] == 1][-shots:]\n val_2 = [x for x in train_dataset if x['label'] == 2][-shots:]\n new_val_dataset = val_0 + val_1 + val_2\n test_dataset = val_dataset\n print('train_dataset', train_dataset)\n return train_dataset, new_val_dataset, test_dataset\n elif self.args['dataset_name'] == 'ag_news' or self.args['dataset_name'] == 'yele_review_full':\n val_0 = [x for x in train_dataset if x['label'] == 0][-shots:]\n val_1 = [x for x in train_dataset if x['label'] == 1][-shots:]\n val_2 = [x for x in train_dataset if x['label'] == 2][-shots:]\n val_3 = [x for x in train_dataset if x['label'] == 3][-shots:]\n val_4 = [x for x in train_dataset if x['label'] == 4][-shots:]\n new_val_dataset = val_0 + val_1 + val_2 + val_3 + val_4\n test_dataset = val_dataset\n print('train_dataset', train_dataset)\n return train_dataset, new_val_dataset, test_dataset\n \n val_0 = [x for x in val_dataset if x['label'] == 0][:shots]\n val_1 = [x for x in val_dataset if x['label'] == 1][:shots]\n val_2 = [x for x in val_dataset if x['label'] == 2][:shots]\n val_dataset = val_0 + val_1 + val_2\n print('train_dataset', train_dataset)\n return train_dataset, val_dataset, test_dataset\n\n def get_verbalizer(self) -> list:\n if 'xnli' in self.args['dataset_name'] or self.args['dataset_name'] == 'mnli' or 'anli' in self.args['dataset_name'] or 'americas_nli' in self.args['dataset_name'] or self.args['dataset_name'] == 'snli':\n verbalizer_predefined = ['yes', 'maybe', 'no']\n elif self.args['dataset_name'] == 'sst2' or self.args['dataset_name'] == 'yelp_polarity':\n verbalizer_predefined = ['negative', 'positive']\n elif self.args['dataset_name'] == 'rte' or self.args['dataset_name'] == 'qnli':\n verbalizer_predefined = ['yes', 'no']\n elif self.args['dataset_name'] == 'mrpc' or self.args['dataset_name'] == 'qqp':\n verbalizer_predefined = ['no', 'yes']\n elif self.args['dataset_name'] == 'boolq':\n verbalizer_predefined = ['no', 'yes']\n elif 'indonlp/NusaX-senti' in self.args['dataset_name']:\n verbalizer_predefined = ['negative', 'neutral', 'positive']\n elif self.args['dataset_name'] == 'ag_news':\n verbalizer_predefined = ['World', 'Sports', 'Business', 'Technology']\n\n special_space = '▁'\n binary_list = ['SetFit/sst2', 'yelp_polarity', 'SetFit/CR', 'rotten_tomatoes']\n rl_binary_list = ['rl-cr', 'rl-mr', 'rl-sst-2', \n 'rl-yelp-2']\n if 'bert' in self.args['model_name']:\n special_space = 'Ġ'\n if self.args['dataset_name'] in binary_list:\n verbalizer_predefined = ['terrible', 'great']\n elif self.args['dataset_name'] == 'ag_news':\n verbalizer_predefined = ['World', 'Sports', 'Business', 'Tech']\n elif self.args['dataset_name'] == 'SetFit/sst5' or self.args['dataset_name'] == 'yelp_review_full':\n verbalizer_predefined = ['terrible', 'bad', 'okay', 'good', 'great']\n elif self.args['dataset_name'] in rl_binary_list:\n verbalizer_predefined = ['terrible', 'great']\n\n verbalizer_predefined = [special_space + v for v in verbalizer_predefined]\n return verbalizer_predefined\n \n def get_data(self, data) -> tuple:\n text_label_list = ['yelp_polarity', 'ag_news', 'SetFit/sst5', 'SetFit/CR', 'rotten_tomatoes', \"SetFit/sst2\", 'yelp_review_full']\n rl_list = ['rl-agnews', 'rl-cr', 'rl-mr', 'rl-sst-2', \n 'rl-sst-5', 'rl-yelp-2', 'rl-yelp-5']\n if 'xnli' in self.args['dataset_name'] or self.args['dataset_name'] == 'mnli' or 'anli' in self.args['dataset_name'] or 'americas_nli' in self.args['dataset_name'] or self.args['dataset_name'] == 'snli':\n return [d[\"premise\"] for d in data], [d[\"hypothesis\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'sst2':\n return [d[\"sentence\"] for d in data], [d[\"sentence\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'rte' or self.args['dataset_name'] == 'mrpc':\n return [d[\"sentence1\"] for d in data], [d[\"sentence2\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'qnli':\n return [d[\"question\"] for d in data], [d[\"sentence\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'qqp':\n return [d[\"question1\"] for d in data], [d[\"question2\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'boolq':\n return [d[\"question\"] for d in data], [d[\"passage\"] for d in data], [d[\"label\"] for d in data]\n elif 'indonlp/NusaX-senti' in self.args['dataset_name'] or self.args['dataset_name'] in text_label_list:\n return [d[\"text\"] for d in data], [d[\"text\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] in rl_list:\n return [d[\"text\"] for d in data], [d[\"text\"] for d in data], [d[\"label\"] for d in data]" }, { "identifier": "GeneticAlgorithmTrainer", "path": "algs/genetics.py", "snippet": "class GeneticAlgorithmTrainer(BaseTrainer):\n def __init__(\n self,\n pop_size: int,\n mutate_size: int,\n crossover_size: int,\n epochs: int,\n mutate_frac: float,\n str_len: int,\n stages: int,\n n_classes: int,\n eval_batch_size: int,\n genetics: Genetics,\n obj_func: PromptedClassificationReward,\n prompt_dataset: PromptedClassificationDataset,\n logger: Any,\n use_bn_calibrator: bool,\n ):\n super().__init__(\n obj_func=obj_func,\n prompt_dataset=prompt_dataset,\n logger=logger,\n use_bn_calibrator=use_bn_calibrator,\n )\n self.pop_size = pop_size\n self.mutate_size = mutate_size\n self.crossover_size = crossover_size\n self.epochs = epochs\n self.mutate_frac = mutate_frac\n self.str_len = str_len\n self.stages = stages\n self.n_classes = n_classes\n self.genetics = genetics\n self.epoch_per_extend = 3\n self.extend_size = 128\n self.eval_batch_size = eval_batch_size\n\n def train(self, train_data):\n premise_texts, hypothesis_texts, class_labels = self.prompt_dataset.get_data(\n train_data\n )\n epoch_per_stage = self.epochs // self.stages\n start_str = \"\"\n best_str_list = []\n\n for _ in range(self.stages):\n pop = [\n self.genetics.random_string(self.str_len) for _ in range(self.pop_size)\n ]\n if self.logger is not None:\n self.logger.info(pop)\n old_reward = 0\n epoch_counter = 0\n for evo_epoch in range(epoch_per_stage):\n if self.str_len == 1:\n pop_ = [start_str + \" \" + p for p in pop]\n else:\n pop_ = [start_str + p for p in pop]\n reward = self.obj_func.forward(\n premise_texts,\n hypothesis_texts,\n class_labels,\n pop_,\n True,\n \"infer\",\n verbose=False,\n )[0]\n if self.logger is not None:\n self.logger.info(\n f\"Epoch = {evo_epoch}. Max reward = {reward.max()}. Best prompt = {pop_[reward.argmax()]}\"\n )\n max_reward = reward.max()\n if max_reward > old_reward:\n old_reward = max_reward\n epoch_counter = 0\n else:\n epoch_counter += 1\n\n sorted_idx = reward.argsort(descending=True)[\n : max(1, int(reward.shape[0] * self.mutate_frac))\n ]\n pop = [pop[i] for i in sorted_idx]\n mutate_cfgs, crossover_cfgs = [], []\n extend_cfgs = []\n for _ in range(self.mutate_size):\n old_cfg = np.random.choice(pop)\n cfg = self.genetics.mutate(old_cfg)\n mutate_cfgs.append(cfg)\n\n for _ in range(self.crossover_size):\n cfg1 = np.random.choice(pop)\n cfg2 = np.random.choice(pop)\n cfg = self.genetics.crossover(cfg1, cfg2)\n crossover_cfgs.append(cfg)\n\n pop += mutate_cfgs\n pop += crossover_cfgs\n\n if self.logger is not None:\n self.logger.info(\n f\"Epoch = {evo_epoch}. Population length = {len(pop)}\"\n )\n\n if self.str_len > 1:\n if pop[reward.argmax()] not in best_str_list:\n best_str_list.append(pop[reward.argmax()])\n else:\n if pop_[reward.argmax()] not in best_str_list:\n best_str_list.append(pop_[reward.argmax()])\n # if we do step by steo do the pop_\n if self.str_len == 1:\n pop_ = [start_str + \" \" + p for p in pop]\n else:\n pop_ = [start_str + p for p in pop]\n start_str = pop_[reward.argmax()]\n\n return best_str_list\n\n def random_train(self, train_data):\n premise_texts, hypothesis_texts, class_labels = self.prompt_dataset.get_data(\n train_data\n )\n start_str = \"\"\n best_str_list = []\n pop = [\n self.genetics.random_string(self.str_len)\n for _ in range(self.pop_size * self.epochs)\n ]\n # logger.info(pop)\n pop_ = [start_str + p for p in pop]\n reward = self.obj_func.forward(\n premise_texts,\n hypothesis_texts,\n class_labels,\n pop_,\n True,\n \"infer\",\n verbose=False,\n )[0]\n\n if self.logger is not None:\n self.logger.info(\n f\"Max reward = {reward.max()}. Best prompt = {pop_[reward.argmax()]}\"\n )\n if pop[reward.argmax()] not in best_str_list:\n best_str_list.append(pop[reward.argmax()])\n return best_str_list" }, { "identifier": "Genetics", "path": "algs/genetics.py", "snippet": "class Genetics:\n def __init__(self, crossover_tokenizer, vocab_id):\n self.crossover_tokenizer = crossover_tokenizer\n self.vocab_id = vocab_id\n\n def mutate(self, x, prob=0.1):\n \"\"\"\n Mutates the input string by replacing tokens with a certain probability.\n\n Args:\n x (str): The input string.\n prob (float, optional): The probability of replacing each token. Defaults to 0.1.\n\n Returns:\n str: The mutated string.\n \"\"\"\n x_list = self.crossover_tokenizer.encode(x)\n\n def pick_another(x_, candidates):\n return (\n x_\n if len(candidates) == 1\n else random.choice([v for v in candidates if v != x_])\n )\n\n for i, element in enumerate(x_list):\n if i == 0 or i == len(x_list) - 1:\n continue\n if random.random() < prob:\n x_list[i] = pick_another(element, self.vocab_id)\n\n out = self.crossover_tokenizer.decode(x_list, skip_special_tokens=True)\n return out\n\n def crossover(self, x1, x2):\n \"\"\"\n Performs crossover between two input strings.\n\n Args:\n x1 (str): The first input string.\n x2 (str): The second input string.\n\n Returns:\n str: The crossover result.\n \"\"\"\n\n def _crossover_helper(v1, v2):\n return v1 if random.random() < 0.5 else v2\n\n def _inbalance_helper(v1, v2):\n n_tokens = min(len(v1), len(v2))\n max_n = max(len(v1), len(v2))\n out_token = []\n for i in range(n_tokens):\n out_token.append(v1[i] if random.random() < 0.5 else v2[i])\n for i in range(n_tokens, max_n):\n out_token.append(v1[i] if len(v1) > n_tokens else v2[i])\n return out_token\n\n x1_tokens = self.crossover_tokenizer.encode(x1)\n x2_tokens = self.crossover_tokenizer.encode(x2)\n x = _crossover_helper(x1_tokens, x2_tokens)\n ret = self.crossover_tokenizer.decode(x, skip_special_tokens=True)\n return ret\n\n def random_string(self, length=5):\n \"\"\"\n Generates a random string of a specified length.\n\n Args:\n length (int, optional): The length of the random string. Defaults to 5.\n\n Returns:\n str: The random string.\n \"\"\"\n choices = self.vocab_id\n out = random.choices(choices, k=length)\n out = self.crossover_tokenizer.decode(out, skip_special_tokens=True)\n return out\n\n def random_extend_pop(self, pop: list, n: int) -> list:\n \"\"\"\n Extends the population with random strings.\n\n Args:\n pop (list): The population.\n n (int): The number of random strings to generate.\n\n Returns:\n list: The extended population.\n \"\"\"\n pop = [p + self.random_string(n) for p in pop]\n return pop" }, { "identifier": "ParticleSwarmOptimizer", "path": "algs/particle_swarm.py", "snippet": "class ParticleSwarmOptimizer(BaseTrainer):\n def __init__(\n self,\n pop_size: int,\n epochs: int,\n mutate_frac: float,\n str_len: int,\n n_classes: int,\n eval_batch_size: int,\n obj_func: PromptedClassificationReward,\n prompt_dataset: PromptedClassificationDataset,\n logger: Any,\n use_bn_calibrator: bool,\n vocab_id,\n crossover_tokenizer,\n ):\n super().__init__(\n obj_func=obj_func,\n prompt_dataset=prompt_dataset,\n logger=logger,\n use_bn_calibrator=use_bn_calibrator,\n )\n self.crossover_tokenizer = crossover_tokenizer\n self.vocab_id = vocab_id\n self.pop_size = pop_size\n self.epochs = epochs\n self.mutate_frac = mutate_frac\n self.str_len = str_len\n self.n_classes = n_classes\n self.eval_batch_size = eval_batch_size\n\n def do_replace(self, x_cur, pos, new_word):\n x_new = x_cur.copy()\n x_new[pos] = new_word\n return x_new\n\n def predict_batch(\n self,\n sentences,\n ):\n return np.array(\n [\n self.predict(\n s,\n )\n for s in sentences\n ]\n )\n\n def predict(\n self,\n sentence,\n ):\n # Alia for reward computation -- note that we expect\n # a list of int in terms of vocab_id for sentence argument here.\n sentence_str = self.crossover_tokenizer.decode(\n sentence, skip_special_tokens=True\n )\n tem = (\n self.obj_func.forward(\n self.premise_texts,\n self.hypothesis_texts,\n self.class_labels,\n [sentence_str],\n True,\n \"infer\",\n verbose=False,\n )[0]\n .detach()\n .cpu()\n .item()\n )\n\n return tem\n\n def select_best_replacement(self, pos, x_cur, replace_list):\n \"\"\"Select the most effective replacement to word at pos (pos)\n in (x_cur) between the words in replace_list\"\"\"\n new_x_list = [\n self.do_replace(x_cur, pos, w) if w != 0 else x_cur for w in replace_list\n ]\n # Randomly select some rather than enumerate, which is very slow\n new_x_list_str = [\n self.crossover_tokenizer.decode(s, skip_special_tokens=True)\n for s in new_x_list\n ]\n x_scores = (\n self.obj_func.forward(\n self.premise_texts,\n self.hypothesis_texts,\n self.class_labels,\n new_x_list_str,\n True,\n \"infer\",\n verbose=False,\n )[0]\n .detach()\n .cpu()\n .numpy()\n )\n # new_x_preds = self.predict_batch(new_x_list)\n # x_scores = new_x_preds # [:, target]\n orig_score = self.predict(x_cur) # [target]\n\n new_x_scores = x_scores - orig_score\n # Eliminate not that clsoe words\n\n if np.max(new_x_scores) > 0:\n best_id = np.argsort(new_x_scores)[-1]\n return [x_scores[best_id], new_x_list[best_id]]\n return [orig_score, x_cur]\n\n def perturb(self, x_cur, neigbhours, w_select_probs):\n # Pick a word that is not modified and is not UNK\n x_len = w_select_probs.shape[0]\n rand_idx = np.random.choice(x_len, 1, p=w_select_probs)[0]\n # while x_cur[rand_idx] != x_orig[rand_idx] and np.sum(x_orig != x_cur) < np.sum(\n # np.sign(w_select_probs)\n # ):\n # rand_idx = np.random.choice(x_len, 1, p=w_select_probs)[0]\n replace_list = neigbhours[rand_idx]\n x_cur[rand_idx] = np.random.choice(replace_list)\n score = self.predict(x_cur)\n return [score, x_cur]\n # return self.select_best_replacement(rand_idx, x_cur, replace_list)\n\n def turn(self, x1, x2, prob, x_len):\n x_new = copy.deepcopy(x2)\n for i in range(x_len):\n if np.random.uniform() < prob[i]:\n x_new[i] = x1[i]\n return x_new\n\n def equal(self, a, b):\n return -3 if a == b else 3\n\n def sigmod(self, n):\n return 1 / (1 + np.exp(-n))\n\n def train(self, train_data):\n (\n self.premise_texts,\n self.hypothesis_texts,\n self.class_labels,\n ) = self.prompt_dataset.get_data(train_data)\n\n neigbhours_list = [self.vocab_id for _ in range(self.str_len)]\n neighbours_len = [len(x) for x in neigbhours_list]\n x_len = self.str_len\n #\n w_select_probs = []\n for pos in range(x_len):\n if neighbours_len[pos] == 0:\n w_select_probs.append(0)\n else:\n w_select_probs.append(min(neighbours_len[pos], 10))\n w_select_probs = w_select_probs / np.sum(w_select_probs)\n\n if np.sum(neighbours_len) == 0:\n return None\n\n # Generate random population\n pop = [\n np.random.choice(self.vocab_id, self.str_len) for _ in range(self.pop_size)\n ]\n pop_scores = self.predict_batch(\n pop,\n )\n\n part_elites = copy.deepcopy(pop)\n part_elites_scores = pop_scores\n all_elite_score = np.max(pop_scores)\n pop_ranks = np.argsort(pop_scores)\n top_attack = pop_ranks[-1]\n all_elite = pop[top_attack]\n\n Omega_1 = 0.8\n Omega_2 = 0.2\n C1_origin = 0.8\n C2_origin = 0.2\n V = [np.random.uniform(-3, 3) for rrr in range(self.pop_size)]\n V_P = [[V[t] for rrr in range(x_len)] for t in range(self.pop_size)]\n\n for i in range(self.epochs):\n Omega = (Omega_1 - Omega_2) * (self.epochs - i) / self.epochs + Omega_2\n C1 = C1_origin - i / self.epochs * (C1_origin - C2_origin)\n C2 = C2_origin + i / self.epochs * (C1_origin - C2_origin)\n\n for id in range(self.pop_size):\n for dim in range(x_len):\n V_P[id][dim] = Omega * V_P[id][dim] + (1 - Omega) * (\n self.equal(pop[id][dim], part_elites[id][dim])\n + self.equal(pop[id][dim], all_elite[dim])\n )\n turn_prob = [self.sigmod(V_P[id][d]) for d in range(x_len)]\n P1 = C1\n P2 = C2\n\n if np.random.uniform() < P1:\n pop[id] = self.turn(part_elites[id], pop[id], turn_prob, x_len)\n if np.random.uniform() < P2:\n pop[id] = self.turn(all_elite, pop[id], turn_prob, x_len)\n\n pop_scores = []\n pop_scores_all = []\n for a in pop:\n pt = self.predict(a)\n\n pop_scores.append(pt)\n pop_scores_all.append(pt)\n pop_ranks = np.argsort(pop_scores)\n top_attack = pop_ranks[-1]\n\n if self.logger is not None:\n self.logger.info(\n f\"{i} -- {pop_scores[top_attack]}。 Best = {self.crossover_tokenizer.decode(all_elite, add_special_tokens=False)}\"\n )\n\n new_pop = []\n new_pop_scores = []\n for id in range(len(pop)):\n x = pop[id]\n if np.random.uniform() < self.mutate_frac:\n tem = self.perturb(x, neigbhours_list, w_select_probs)\n # if tem is None:\n # return None\n # # if tem[0] == 1:\n # # return tem[1]\n # else:\n new_pop_scores.append(tem[0])\n new_pop.append(tem[1])\n else:\n new_pop_scores.append(pop_scores[id])\n new_pop.append(x)\n pop = new_pop\n\n pop_scores = new_pop_scores\n pop_ranks = np.argsort(pop_scores)\n top_attack = pop_ranks[-1]\n for k in range(self.pop_size):\n if pop_scores[k] > part_elites_scores[k]:\n part_elites[k] = pop[k]\n part_elites_scores[k] = pop_scores[k]\n elite = pop[top_attack]\n if np.max(pop_scores) > all_elite_score:\n all_elite = elite\n all_elite_score = np.max(pop_scores)\n\n all_elite_str = self.crossover_tokenizer.decode(\n all_elite, add_special_tokens=False\n )\n\n return [all_elite_str]" }, { "identifier": "GreedyTrainer", "path": "algs/greedy.py", "snippet": "class GreedyTrainer(BaseTrainer):\n def __init__(\n self,\n obj_func: PromptedClassificationReward,\n prompt_dataset: PromptedClassificationDataset,\n vocab_id,\n crossover_tokenizer,\n str_len: int,\n n_classes: int,\n eval_batch_size: int,\n logger,\n use_bn_calibrator: bool = False,\n n_samples_bn_calibrator: int = 128,\n ):\n super().__init__(\n obj_func, prompt_dataset, logger, use_bn_calibrator, n_samples_bn_calibrator\n )\n self.vocab_id = vocab_id\n self.crossover_tokenizer = crossover_tokenizer\n self.str_len = str_len\n self.n_classes = n_classes\n self.eval_batch_size = eval_batch_size\n\n def train(self, train_data):\n premise_texts, hypothesis_texts, class_labels = self.prompt_dataset.get_data(\n train_data\n )\n prompt = \"\"\n candidate_strs = [\n self.crossover_tokenizer.decode([d], skip_special_tokens=True)\n for d in self.vocab_id\n ]\n for _ in range(self.str_len):\n pop = [prompt + candidate_str for candidate_str in candidate_strs]\n # Evaluate the reward of all pop\n reward = (\n self.obj_func.forward(\n premise_texts,\n hypothesis_texts,\n class_labels,\n pop,\n True,\n \"infer\",\n verbose=False,\n )[0]\n .detach()\n .cpu()\n .numpy()\n )\n best_reward_idx = np.argmax(reward)\n if not prompt:\n prompt = candidate_strs[best_reward_idx]\n else:\n prompt += candidate_strs[best_reward_idx]\n print(f\"Current reward = {reward[best_reward_idx]}. Best prompt = {prompt}\")\n return [prompt]" } ]
import random import numpy as np import json import argparse import os import torch import logging from tqdm import tqdm from transformers import AutoTokenizer, set_seed from rewards.text_classification_reward import PromptedClassificationReward from utils.fsc_datasets import PromptedClassificationDataset from algs.genetics import GeneticAlgorithmTrainer, Genetics from algs.particle_swarm import ParticleSwarmOptimizer from algs.greedy import GreedyTrainer
13,773
logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) def remove_special_token(text: str, special_token: str) -> str: return text.replace(special_token, "") def find_kl_dict(args, data, vocab, obj_func, prompted_dataset): premise_texts, hypothesis_texts, class_labels = prompted_dataset.get_data(data) if args["prune_type"] == "kl": default_probs = obj_func.compute_default_kl( premise_texts, hypothesis_texts, class_labels, "", True ) else: default_probs = obj_func.compute_default_reward( premise_texts, hypothesis_texts, class_labels, "", True ) collect_kl = [] kl_dict = {} for v, k in tqdm(vocab.items()): if args["prune_type"] == "kl": kl = obj_func.compute_kl( premise_texts, hypothesis_texts, class_labels, v, True, default_probs ) else: kl = obj_func.compute_reward_diff( premise_texts, hypothesis_texts, class_labels, v, True, default_probs ) collect_kl.append(kl) kl_dict[v] = kl for k, v in kl_dict.items(): kl_dict[k] = float(v) with open(args["dict_path"], "w") as fp: json.dump(kl_dict, fp, indent=4, ensure_ascii=False) collect_kl_np = [] for tensor in collect_kl: collect_kl_np.append(tensor.cpu().numpy()) return kl_dict, collect_kl_np def load_kl_dict(args): # load the KL dict from json file with open(args["dict_path"], "r") as fp: kl_dict = json.load(fp) collect_kl_np = [] for k, v in kl_dict.items(): collect_kl_np.append(v) return kl_dict, collect_kl_np def load_vocab(args): with open(args["vocab_path"], "r") as fp: vocab = json.load(fp) vocab_key = [] vocab_id = [] for k, v in vocab.items(): vocab_key.append(k) vocab_id.append(v) return vocab, vocab_key, vocab_id def action_set_pruning(args, kl_dict, collect_kl_np, vocab): if not args["random_prune"]: collect_kl_np = np.array(collect_kl_np) top_10_percent = np.percentile(collect_kl_np, args["percentile"]) # filter the vocab based on the top_10_percent_idx new_vocab = { word: vocab[word] for word, value in kl_dict.items() if value > top_10_percent } vocab = new_vocab vocab_key = [] vocab_id = [] for k, v in vocab.items(): vocab_key.append(k) vocab_id.append(v) logger.info(len(vocab_key)) else: # random select 10% of the vocab vocab, vocab_key, vocab_id = random_pruning(args, vocab, args["percentile"]) logger.info(len(vocab_key)) return vocab, vocab_key, vocab_id def random_pruning(args, vocab: dict, percent: int = 99): vocab_key = [] vocab_id = [] for k, v in vocab.items(): vocab_key.append(k) vocab_id.append(v) length = int(len(vocab_key) * (100 - percent) / 100) pruned_index = random.sample(list(np.arange(len(vocab_key))), length) vocab_key = [vocab_key[i] for i in pruned_index] vocab_id = [vocab_id[i] for i in pruned_index] vocab = {vocab_key[i]: vocab_id[i] for i in range(len(vocab_key))} logger.info(len(vocab_key)) return vocab, vocab_key, vocab_id def main(args): print(args) set_seed(args["seed"]) revocab_flag = args["reprune_vocab"] shots = args["num_shots"] batch_size = args["train_batch_size"] args["is_mask_lm"] = False special_space = "▁" if "bert" in args["model_name"]: args["is_mask_lm"] = True special_space = "Ġ" logging.info("......Loading dataset......")
logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) def remove_special_token(text: str, special_token: str) -> str: return text.replace(special_token, "") def find_kl_dict(args, data, vocab, obj_func, prompted_dataset): premise_texts, hypothesis_texts, class_labels = prompted_dataset.get_data(data) if args["prune_type"] == "kl": default_probs = obj_func.compute_default_kl( premise_texts, hypothesis_texts, class_labels, "", True ) else: default_probs = obj_func.compute_default_reward( premise_texts, hypothesis_texts, class_labels, "", True ) collect_kl = [] kl_dict = {} for v, k in tqdm(vocab.items()): if args["prune_type"] == "kl": kl = obj_func.compute_kl( premise_texts, hypothesis_texts, class_labels, v, True, default_probs ) else: kl = obj_func.compute_reward_diff( premise_texts, hypothesis_texts, class_labels, v, True, default_probs ) collect_kl.append(kl) kl_dict[v] = kl for k, v in kl_dict.items(): kl_dict[k] = float(v) with open(args["dict_path"], "w") as fp: json.dump(kl_dict, fp, indent=4, ensure_ascii=False) collect_kl_np = [] for tensor in collect_kl: collect_kl_np.append(tensor.cpu().numpy()) return kl_dict, collect_kl_np def load_kl_dict(args): # load the KL dict from json file with open(args["dict_path"], "r") as fp: kl_dict = json.load(fp) collect_kl_np = [] for k, v in kl_dict.items(): collect_kl_np.append(v) return kl_dict, collect_kl_np def load_vocab(args): with open(args["vocab_path"], "r") as fp: vocab = json.load(fp) vocab_key = [] vocab_id = [] for k, v in vocab.items(): vocab_key.append(k) vocab_id.append(v) return vocab, vocab_key, vocab_id def action_set_pruning(args, kl_dict, collect_kl_np, vocab): if not args["random_prune"]: collect_kl_np = np.array(collect_kl_np) top_10_percent = np.percentile(collect_kl_np, args["percentile"]) # filter the vocab based on the top_10_percent_idx new_vocab = { word: vocab[word] for word, value in kl_dict.items() if value > top_10_percent } vocab = new_vocab vocab_key = [] vocab_id = [] for k, v in vocab.items(): vocab_key.append(k) vocab_id.append(v) logger.info(len(vocab_key)) else: # random select 10% of the vocab vocab, vocab_key, vocab_id = random_pruning(args, vocab, args["percentile"]) logger.info(len(vocab_key)) return vocab, vocab_key, vocab_id def random_pruning(args, vocab: dict, percent: int = 99): vocab_key = [] vocab_id = [] for k, v in vocab.items(): vocab_key.append(k) vocab_id.append(v) length = int(len(vocab_key) * (100 - percent) / 100) pruned_index = random.sample(list(np.arange(len(vocab_key))), length) vocab_key = [vocab_key[i] for i in pruned_index] vocab_id = [vocab_id[i] for i in pruned_index] vocab = {vocab_key[i]: vocab_id[i] for i in range(len(vocab_key))} logger.info(len(vocab_key)) return vocab, vocab_key, vocab_id def main(args): print(args) set_seed(args["seed"]) revocab_flag = args["reprune_vocab"] shots = args["num_shots"] batch_size = args["train_batch_size"] args["is_mask_lm"] = False special_space = "▁" if "bert" in args["model_name"]: args["is_mask_lm"] = True special_space = "Ġ" logging.info("......Loading dataset......")
prompt_dataset = PromptedClassificationDataset(args)
1
2023-10-08 12:39:44+00:00
16k
clessig/atmorep
atmorep/core/atmorep_model.py
[ { "identifier": "identity", "path": "atmorep/utils/utils.py", "snippet": "def identity( func, *args) :\n return func( *args)" }, { "identifier": "NetMode", "path": "atmorep/utils/utils.py", "snippet": "class NetMode( Enum) :\n indeterminate = 0\n train = 1\n test = 2" }, { "identifier": "get_model_filename", "path": "atmorep/utils/utils.py", "snippet": "def get_model_filename( model = None, model_id = '', epoch=-2, with_model_path = True) :\n\n if isinstance( model, str) :\n name = model \n elif model :\n name = model.__class__.__name__\n else : # backward compatibility\n name = 'mod'\n\n mpath = 'id{}'.format(model_id) if with_model_path else ''\n\n if epoch > -2 :\n # model_file = Path( config.path_results, 'models/id{}/{}_id{}_epoch{}.mod'.format(\n # model_id, name, model_id, epoch))\n model_file = Path( config.path_models, mpath, '{}_id{}_epoch{}.mod'.format(\n name, model_id, epoch))\n else :\n model_file = Path( config.path_models, mpath, '{}_id{}.mod'.format( name, model_id))\n \n return model_file" }, { "identifier": "prepare_token", "path": "atmorep/transformer/transformer_base.py", "snippet": "def prepare_token( xin, embed, embed_token_info, with_cls = True) :\n\n (token_seq, token_info) = xin\n num_tokens = token_seq.shape[-6:-3]\n num_levels = token_seq.shape[1]\n\n # embedding, flatten along token dimension and spatial dimensions\n token_seq_embed = embed( torch.flatten( torch.flatten( token_seq, -3, -1), -3, -2) )\n \n # add auxiliary, global token information\n token_info = embed_token_info( token_info).to( token_seq_embed.device, non_blocking=True )\n # token_info = prepare_token_info( cf, token_info)\n token_info = token_info.reshape([-1] + list(token_seq_embed.shape[1:-1])+[token_info.shape[-1]])\n token_seq_embed = torch.cat( [token_seq_embed, token_info], -1)\n\n # class token\n if with_cls :\n # initialize to zero (mean of data)\n tts = token_seq_embed.shape\n cls_token = torch.zeros( (tts[0], 1, tts[2]), device=token_seq_embed.device)\n \n # add positional encoding\n token_seq_embed = positional_encoding_harmonic( token_seq_embed, num_levels, num_tokens)\n\n # add class token after positional encoding\n if with_cls :\n token_seq_embed = torch.cat( [ cls_token, token_seq_embed ], 1)\n\n return token_seq_embed" }, { "identifier": "checkpoint_wrapper", "path": "atmorep/transformer/transformer_base.py", "snippet": "def checkpoint_wrapper( cmodule, *kwargs) :\n if cmodule.training :\n return torch.utils.checkpoint.checkpoint( cmodule, *kwargs, use_reentrant=False)\n else :\n return cmodule(*kwargs)" }, { "identifier": "MultifieldDataSampler", "path": "atmorep/datasets/multifield_data_sampler.py", "snippet": "class MultifieldDataSampler( torch.utils.data.IterableDataset):\n \n ###################################################\n def __init__( self, file_path, years_data, fields, batch_size, \n num_t_samples, num_patches_per_t, num_load, pre_batch, \n rng_seed = None, file_shape = (-1, 721, 1440),\n level_type = 'ml', time_sampling = 1, \n smoothing = 0, file_format = 'grib', month = None, lat_sampling_weighted = True,\n geo_range = [[-90.,90.], [0.,360.]], \n fields_targets = [], pre_batch_targets = None\n ) :\n '''\n Data set for single dynamic field at an arbitrary number of vertical levels\n '''\n super( MultifieldDataSampler).__init__()\n\n self.fields = fields\n self.batch_size = batch_size\n\n self.pre_batch = pre_batch\n\n self.years_data = years_data\n self.time_sampling = time_sampling\n self.month = month\n self.range_lat = 90. - np.array( geo_range[0])\n self.range_lon = np.array( geo_range[1])\n self.geo_range = geo_range\n\n # order North to South\n self.range_lat = np.flip(self.range_lat) if self.range_lat[1] < self.range_lat[0] \\\n else self.range_lat\n\n # prepare range_lat and range_lon for sampling\n self.is_global = 0 == self.range_lat[0] and self.range_lon[0] == 0. \\\n and 180. == self.range_lat[1] and 360. == self.range_lon[1]\n \n # TODO: this assumes file_shape is set correctly and not just per field and it defines a \n # reference grid, likely has to be the coarsest\n self.res = 360. / file_shape[2]\n \n # avoid wrap around at poles\n pole_offset = np.ceil(fields[0][3][1] * fields[0][4][1] / 2) * self.res\n self.range_lat[0] = pole_offset if self.range_lat[0] < pole_offset else self.range_lat[0]\n self.range_lat[1] =180.-pole_offset if 180.-self.range_lat[1]<pole_offset else self.range_lat[1]\n\n self.lat_sampling_weighted = lat_sampling_weighted\n\n self.level_type = level_type\n self.smoothing = smoothing\n\n self.file_path = config.path_data\n self.file_shape = file_shape\n self.file_format = file_format\n self.num_load = num_load\n self.num_patches_per_t = int(num_patches_per_t)\n self.num_t_samples = int(num_t_samples)\n\n self.fields_targets = fields_targets\n self.pre_batch_targets = pre_batch_targets\n\n # convert to mathematical latitude and ensure North -> South ordering\n # shrink so that cookie cutting based on sampling does not exceed domain if it is not global\n if not self.is_global :\n # TODO: check that field data is consistent and covers the same spatial domain \n # TODO: code below assumes that fields[0] is global\n # TODO: code below does not handle anisotropic grids\n finfo = self.fields[0]\n # ensure that delta is a multiple of the coarse grid resolution\n ngrid1 = finfo[3][1] * finfo[4][1]\n ngrid2 = finfo[3][2] * finfo[4][2]\n delta1 = 0.5 * self.res * (ngrid1-1 if ngrid1 % 2==0 else ngrid1+1)\n delta2 = 0.5 * self.res * (ngrid2-1 if ngrid2 % 2==0 else ngrid2+1)\n self.range_lat += np.array([delta1, -delta1])\n self.range_lon += np.array([delta2, -delta2])\n\n # ensure all data loaders use same rng_seed and hence generate consistent data\n if not rng_seed :\n rng_seed = np.random.randint( 0, 100000, 1)[0]\n self.rng = np.random.default_rng( rng_seed)\n\n # create (source) fields\n self.datasets = self.create_loaders( fields)\n\n # create (target) fields \n self.datasets_targets = self.create_loaders( fields_targets)\n\n ###################################################\n def create_loaders( self, fields ) :\n\n datasets = []\n for field_idx, field_info in enumerate(fields) :\n\n datasets.append( [])\n\n # extract field info\n (vls, num_tokens, token_size) = field_info[2:5]\n\n if len(field_info) > 6 :\n corr_type = field_info[6]\n else:\n corr_type = 'global'\n\n smoothing = self.smoothing\n log_transform_data = False\n if len(field_info) > 7 :\n (data_type, file_shape, file_geo_range, file_format) = field_info[7][:4]\n if len( field_info[7]) > 6 :\n smoothing = field_info[7][6]\n print( '{} : smoothing = {}'.format( field_info[0], smoothing) )\n if len( field_info[7]) > 7 :\n log_transform_data = field_info[7][7]\n print( '{} : log_transform_data = {}'.format( field_info[0], log_transform_data) )\n else :\n data_type = 'era5'\n file_format = self.file_format\n file_shape = self.file_shape\n file_geo_range = [[90.,-90.], [0.,360.]]\n\n # static fields\n if 0 == field_info[1][0] :\n datasets[-1].append( StaticField( self.file_path, field_info, self.batch_size, data_type,\n file_shape, file_geo_range,\n num_tokens, token_size, smoothing, file_format, corr_type) )\n \n # dynamic fields\n elif 1 == field_info[1][0] :\n for vlevel in vls :\n datasets[-1].append( DynamicFieldLevel( self.file_path, self.years_data, field_info,\n self.batch_size, data_type,\n file_shape, file_geo_range,\n num_tokens, token_size,\n self.level_type, vlevel, self.time_sampling, \n smoothing, file_format, corr_type, \n log_transform_data ) )\n \n else :\n assert False\n\n return datasets \n\n ###################################################\n def shuffle( self) :\n\n # ensure that different parallel loaders create independent random shuffles\n delta = torch.randint( 0, 100000, (1,)).item()\n self.rng.bit_generator.advance( delta)\n\n self.idxs_perm = np.zeros( (0, 4), dtype=np.int64)\n\n # latitude, first map to mathematical lat coords in [0,180.], then to [0,pi] then\n # to z-value in [-1,1]\n if self.lat_sampling_weighted :\n lat_r = np.cos( self.range_lat/180. * np.pi)\n else :\n lat_r = self.range_lat\n\n # 1.00001 is a fudge factor since np.round(*.5) leads to flooring instead of proper up-rounding\n res_inv = 1.0 / self.res * 1.00001\n\n # loop over individual data year-month items \n for i_ym in range( len(self.years_months)) :\n \n ym = self.years_months[i_ym]\n \n # ensure a constant size of work load of data loader independent of the month length \n # factor of 128 is a fudge parameter to ensure that mod-ing leads to sufficiently \n # random wrap-around (with 1 instead of 128 there is clustering on the first days)\n hours_in_day = int( 24 / self.time_sampling)\n time_slices = 128 * 31 * hours_in_day\n time_slices_i_ym = hours_in_day * days_in_month( ym[0], ym[1])\n idxs_perm_temp = np.mod(self.rng.permutation(time_slices), time_slices_i_ym)\n # fixed number of time samples independent of length of month\n idxs_perm_temp = idxs_perm_temp[:self.num_t_samples]\n idxs_perm = np.zeros( (self.num_patches_per_t *idxs_perm_temp.shape[0],4) )\n\n # split up into file index and local index\n idx = 0\n for it in idxs_perm_temp :\n \n idx_patches = self.rng.random( (self.num_patches_per_t, 2) )\n # for jj in idx_patches :\n for jj in idx_patches :\n # area consistent sampling on the sphere (with less patches close to the pole)\n # see https://graphics.stanford.edu/courses/cs448-97-fall/notes.html , Lecture 7\n # for area preserving sampling of the sphere\n # py \\in [0,180], px \\in [0,360] (possibly with negative values for lon)\n if self.lat_sampling_weighted :\n py = ((np.arccos(lat_r[0] + (lat_r[1]-lat_r[0]) * jj[0]) / np.pi) * 180.)\n else :\n py = (lat_r[0] + (lat_r[1]-lat_r[0]) * jj[0])\n px = jj[1] * (self.range_lon[1] - self.range_lon[0]) + self.range_lon[0]\n\n # align with grid\n py = self.res * np.round( py * res_inv)\n px = self.res * np.round( px * res_inv)\n\n idxs_perm[idx] = np.array( [i_ym, it, py, px])\n idx = idx + 1\n\n self.idxs_perm = np.concatenate( (self.idxs_perm, idxs_perm[:idx]))\n\n # shuffle again to avoid clustering of patches by loop over idx_patches above\n self.idxs_perm = self.idxs_perm[self.rng.permutation(self.idxs_perm.shape[0])]\n self.idxs_perm = self.idxs_perm[self.rng.permutation(self.idxs_perm.shape[0])]\n # restrict to multiples of batch size\n lenbatch = int(math.floor(self.idxs_perm.shape[0] / self.batch_size)) * self.batch_size\n self.idxs_perm = self.idxs_perm[:lenbatch]\n # # DEBUG\n # print( 'self.idxs_perm.shape = {}'.format(self.idxs_perm.shape ))\n # rank = torch.distributed.get_rank()\n # fname = 'idxs_perm_rank{}_{}.dat'.format( rank, shape_to_str( self.idxs_perm.shape))\n # self.idxs_perm.tofile( fname)\n\n ###################################################\n def set_full_time_range( self) :\n\n self.idxs_perm = np.zeros( (0, 4), dtype=np.int64)\n\n # latitude, first map to mathematical lat coords in [0,180.], then to [0,pi] then\n # to z-value in [-1,1]\n if self.lat_sampling_weighted :\n lat_r = np.cos( self.range_lat/180. * np.pi)\n else :\n lat_r = self.range_lat\n\n # 1.00001 is a fudge factor since np.round(*.5) leads to flooring instead of proper up-rounding\n res_inv = 1.0 / self.res * 1.00001\n\n # loop over individual data year-month items \n for i_ym in range( len(self.years_months)) :\n\n ym = self.years_months[i_ym]\n\n hours_in_day = int( 24 / self.time_sampling)\n idxs_perm_temp = np.arange( hours_in_day * days_in_month( ym[0], ym[1]))\n idxs_perm = np.zeros( (self.num_patches_per_t *idxs_perm_temp.shape[0],4) )\n\n # split up into file index and local index\n idx = 0\n for it in idxs_perm_temp :\n\n idx_patches = self.rng.random( (self.num_patches_per_t, 2) )\n for jj in idx_patches :\n # area consistent sampling on the sphere (with less patches close to the pole)\n # see https://graphics.stanford.edu/courses/cs448-97-fall/notes.html , Lecture 7\n # for area preserving sampling of the sphere\n # py \\in [0,180], px \\in [0,360] (possibly with negative values for lon)\n if self.lat_sampling_weighted :\n py = ((np.arccos(lat_r[0] + (lat_r[1]-lat_r[0]) * jj[0]) / np.pi) * 180.)\n else :\n py = (lat_r[0] + (lat_r[1]-lat_r[0]) * jj[0])\n px = jj[1] * (self.range_lon[1] - self.range_lon[0]) + self.range_lon[0]\n\n # align with grid\n py = self.res * np.round( py * res_inv)\n px = self.res * np.round( px * res_inv)\n\n idxs_perm[idx] = np.array( [i_ym, it, py, px])\n idx = idx + 1\n\n self.idxs_perm = np.concatenate( (self.idxs_perm, idxs_perm[:idx]))\n\n # shuffle again to avoid clustering of patches by loop over idx_patches above\n self.idxs_perm = self.idxs_perm[self.rng.permutation(self.idxs_perm.shape[0])]\n # restrict to multiples of batch size\n lenbatch = int(math.floor(self.idxs_perm.shape[0] / self.batch_size)) * self.batch_size\n self.idxs_perm = self.idxs_perm[:lenbatch]\n\n # # DEBUG\n # print( 'self.idxs_perm.shape = {}'.format(self.idxs_perm.shape ))\n # fname = 'idxs_perm_{}_{}.dat'.format( self.epoch_counter, shape_to_str( self.idxs_perm.shape))\n # self.idxs_perm.tofile( fname)\n\n ###################################################\n def load_data( self, batch_size = None) :\n\n years_data = self.years_data\n \n # ensure proper separation of different random samplers\n delta = torch.randint( 0, 1000, (1,)).item()\n self.rng.bit_generator.advance( delta)\n\n # select num_load random months and years \n perms = np.concatenate( [self.rng.permutation( np.arange(len(years_data))) for i in range(64)])\n perms = perms[:self.num_load]\n if self.month : \n self.years_months = [ (years_data[iyear], self.month) for iyear in perms]\n else : \n # stratified sampling of month to ensure proper distribution, needs to be adapted for \n # number of parallel workers not being divisible by 4\n # rank, ms = torch.distributed.get_rank() % 4, 3\n # perms_m = np.concatenate( [self.rng.permutation( np.arange( rank*ms+1, (rank+1)*ms+1))\n # for i in range(16)])\n perms_m = np.concatenate( [self.rng.permutation( np.arange( 1, 12+1)) for i in range(16)])\n self.years_months = [ ( years_data[iyear], perms_m[i]) for i,iyear in enumerate(perms)]\n\n # generate random permutations passed to the loaders for individual files \n # to ensure consistent processing\n self.shuffle()\n\n # perform actual loading of data\n \n for ds_field in self.datasets :\n for ds in ds_field :\n ds.load_data( self.years_months, self.idxs_perm, batch_size)\n\n for ds_field in self.datasets_targets :\n for ds in ds_field :\n ds.load_data( self.years_months, self.idxs_perm, batch_size)\n\n ###################################################\n def set_data( self, times_pos, batch_size = None) :\n '''\n times_pos = np.array( [ [year, month, day, hour, lat, lon], ...] )\n - lat \\in [90,-90] = [90N, 90S]\n - lon \\in [0,360]\n - (year,month) pairs should be a limited number since all data for these is loaded\n '''\n\n # extract required years and months\n years_months_all = np.array( [ [it[0], it[1]] for it in times_pos ], dtype=np.int64)\n self.years_months = list( zip( np.unique(years_months_all[:,0]),\n np.unique( years_months_all[:,1] )))\n\n # generate all the data\n self.idxs_perm = np.zeros( (len(times_pos), 4))\n for idx, item in enumerate( times_pos) :\n\n assert item[2] >= 1 and item[2] <= 31\n assert item[3] >= 0 and item[3] < int(24 / self.time_sampling)\n assert item[4] >= -90. and item[4] <= 90.\n\n # find year\n for i_ym, ym in enumerate( self.years_months) :\n if ym[0] == item[0] and ym[1] == item[1] :\n break\n\n # last term: correct for window from last file that is loaded\n it = (item[2] - 1) * (24./self.time_sampling) + item[3]\n # it = item[2] * (24./self.time_sampling) + item[3]\n idx_lat = item[4]\n idx_lon = item[5]\n\n # work with mathematical lat coordinates from here on\n self.idxs_perm[idx] = np.array( [i_ym, it, 90. - idx_lat, idx_lon])\n\n for ds_field in self.datasets :\n for ds in ds_field :\n ds.load_data( self.years_months, self.idxs_perm, batch_size)\n\n for ds_field in self.datasets_targets :\n for ds in ds_field :\n ds.load_data( self.years_months, self.idxs_perm, batch_size)\n\n ###################################################\n def set_global( self, times, batch_size = None, token_overlap = [0, 0]) :\n ''' generate patch/token positions for global grid '''\n\n token_overlap = torch.tensor( token_overlap).to(torch.int64)\n\n # assumed that sanity checking that field data is consistent has been done \n ifield = 0\n field = self.fields[ifield]\n\n res = self.res\n side_len = torch.tensor( [field[3][1] * field[4][1] * res, field[3][2] * field[4][2] * res] )\n overlap = torch.tensor( [token_overlap[0]*field[4][1]*res, token_overlap[1]*field[4][2]*res] )\n side_len_2 = side_len / 2.\n assert all( overlap <= side_len_2), 'token_overlap too large for #tokens, reduce if possible'\n\n # generate tiles\n times_pos = []\n for ctime in times :\n\n lat = side_len_2[0].item()\n num_tiles_lat = 0\n while (lat + side_len_2[0].item()) < 180. :\n num_tiles_lat += 1\n lon = side_len_2[1].item() - overlap[1].item()/2.\n num_tiles_lon = 0\n while (lon - side_len_2[1]) < 360. :\n times_pos += [[*ctime, -lat + 90., np.mod(lon,360.) ]]\n lon += side_len[1].item() - overlap[1].item()\n num_tiles_lon += 1\n lat += side_len[0].item() - overlap[0].item()\n\n # add one additional row if no perfect tiling (sphere is toric in longitude so no special\n # handling necessary but not in latitude)\n # the added row is such that it goes exaclty down to the South pole and the offset North-wards\n # is computed based on this\n lat -= side_len[0] - overlap[0]\n if lat - side_len_2[0] < 180. :\n num_tiles_lat += 1\n lat = 180. - side_len_2[0].item() + res\n lon = side_len_2[1].item() - overlap[1].item()/2.\n while (lon - side_len_2[1]) < 360. :\n times_pos += [[*ctime, -lat + 90., np.mod(lon,360.) ]]\n lon += side_len[1].item() - overlap[1].item()\n\n # adjust batch size if necessary so that the evaluations split up across batches of equal size\n batch_size = num_tiles_lon\n\n print( 'Number of batches per global forecast: {}'.format( num_tiles_lat) )\n\n self.set_data( times_pos, batch_size)\n\n ###################################################\n def set_location( self, pos, years, months, num_t_samples_per_month, batch_size = None) :\n ''' random time sampling for fixed location '''\n\n times_pos = []\n for i_ym, ym in enumerate(itertools.product( years, months )) :\n\n # ensure a constant size of work load of data loader independent of the month length \n # factor of 128 is a fudge parameter to ensure that mod-ing leads to sufficiently \n # random wrap-around (with 1 instead of 128 there is clustering on the first days)\n hours_in_day = int( 24 / self.time_sampling)\n d_i_m = days_in_month( ym[0], ym[1]) \n perms = self.rng.permutation( num_t_samples_per_month * d_i_m)\n # ensure that days start at 1\n perms = np.mod( perms[ : num_t_samples_per_month], (d_i_m-1) ) + 1\n rhs = self.rng.integers(low=0, high=hours_in_day, size=num_t_samples_per_month )\n\n for rh, perm in zip( rhs, perms) :\n times_pos += [[ ym[0], ym[1], perm, rh, pos[0], pos[1]] ]\n\n # adjust batch size if necessary so that the evaluations split up across batches of equal size\n while 0 != (len(times_pos) % batch_size) :\n batch_size -= 1\n assert batch_size >= 1\n\n self.set_data( times_pos, batch_size)\n\n ###################################################\n def __iter__(self):\n\n iter_start, iter_end = self.worker_workset()\n\n for bidx in range( iter_start, iter_end) :\n\n sources = []\n for ds_field in self.datasets : \n sources.append( [ds_level[bidx] for ds_level in ds_field])\n # perform batch pre-processing, e.g. BERT-type masking\n if self.pre_batch :\n sources = self.pre_batch( sources)\n\n targets = []\n for ds_field in self.datasets_targets :\n targets.append( [ds_level[bidx] for ds_level in ds_field])\n # perform batch pre-processing, e.g. BERT-type masking\n if self.pre_batch_targets :\n targets = self.pre_batch_targets( targets)\n\n yield (sources,targets)\n\n ###################################################\n def __len__(self):\n return len(self.datasets[0][0])\n\n ###################################################\n def worker_workset( self) :\n\n worker_info = torch.utils.data.get_worker_info()\n\n if worker_info is None: \n iter_start = 0\n iter_end = len(self.datasets[0][0])\n\n else: \n # split workload\n temp = len(self.datasets[0][0])\n per_worker = int( np.floor( temp / float(worker_info.num_workers) ) )\n worker_id = worker_info.id\n iter_start = int(worker_id * per_worker)\n iter_end = int(iter_start + per_worker)\n if worker_info.id+1 == worker_info.num_workers :\n iter_end = int(temp)\n\n return iter_start, iter_end" }, { "identifier": "TransformerEncoder", "path": "atmorep/transformer/transformer_encoder.py", "snippet": "class TransformerEncoder(torch.nn.Module) :\n\n def __init__(self, cf, field_idx, with_embed = True):\n ''' '''\n \n super(TransformerEncoder, self).__init__()\n\n self.cf = cf\n self.field_idx = field_idx\n self.with_embed = with_embed\n\n ###################################\n def create( self) :\n\n cf = self.cf\n with_ln = cf.with_layernorm\n\n self.fields_index = {}\n for ifield, field_info in enumerate(cf.fields) :\n self.fields_index[ field_info[0] ] = ifield \n\n field_info = cf.fields[self.field_idx]\n \n # learnable linear embedding\n if self.with_embed :\n net_dim_input = np.prod(field_info[4]) \n self.embed = torch.nn.Linear( net_dim_input, field_info[1][1]- cf.size_token_info_net)\n\n # num_heads_coupling\n dor = cf.dropout_rate\n self.heads = torch.nn.ModuleList()\n self.mlps = torch.nn.ModuleList()\n for il in range( cf.encoder_num_layers) :\n\n nhc = cf.coupling_num_heads_per_field * len( field_info[1][2])\n # nhs = cf.encoder_num_heads - nhc\n nhs = cf.encoder_num_heads\n # number of tokens\n n_toks = torch.tensor( field_info[3], dtype=torch.int64)\n \n dims_embed = [ field_info[1][1] ]\n vl_num_tokens = [len(field_info[2])] + field_info[3]\n for field_coupled in field_info[1][2] : \n if 'axial' in cf.encoder_att_type :\n finfo_other = cf.fields[ self.fields_index[field_coupled] ]\n dims_embed.append( finfo_other[1][1] )\n vl_num_tokens.append( [len(finfo_other[2])] + finfo_other[3] )\n else : \n for _ in range(cf.coupling_num_heads_per_field) :\n finfo_other = cf.fields[ self.fields_index[field_coupled] ]\n dims_embed.append( finfo_other[1][1] )\n vl_num_tokens.append( [len(finfo_other[2])] + finfo_other[3] )\n\n # attention heads\n if 'dense' == cf.encoder_att_type :\n head = MultiInterAttentionHead( nhs, nhc, dims_embed, with_ln, dor, cf.with_qk_lnorm, \n cf.grad_checkpointing, with_attention=cf.attention )\n elif 'axial' in cf.encoder_att_type :\n par = True if 'parallel' in cf.encoder_att_type else False\n head = MultiFieldAxialAttention( [3,2,1], dims_embed, nhs, nhc, par, dor)\n else :\n assert False, 'Unsupported attention type: ' + cf.decoder_att_type\n self.heads.append( head)\n # feature space mapping sub-block\n self.mlps.append( MLP( dims_embed[0], cf.encoder_num_mlp_layers, with_ln, dropout_rate=dor,\n grad_checkpointing = cf.grad_checkpointing))\n\n return self\n\n ###################################\n def forward(self, xin):\n ''' '''\n assert False" }, { "identifier": "TransformerDecoder", "path": "atmorep/transformer/transformer_decoder.py", "snippet": "class TransformerDecoder(torch.nn.Module) :\n\n ###################################\n def __init__(self, cf, field_info ):\n '''\n Vaswani transformer corresponds to self_att = True and cross_att_ratio = 1. *and* encoder_out \n passed to forward is the output of the encoder (duplicated to match the number of layers)\n '''\n super( TransformerDecoder, self).__init__()\n \n self.cf = cf\n self.num_layers = cf.decoder_num_layers\n self.dim_embed = field_info[1][1]\n\n # TODO: split up create() for consistency\n\n num_heads = cf.decoder_num_heads\n num_mlp_layers = cf.decoder_num_mlp_layers \n self_att = cf.decoder_self_att\n cross_att_ratio = cf.decoder_cross_att_ratio \n\n num_heads_other = int(num_heads * cross_att_ratio)\n num_heads_self = num_heads - num_heads_other\n\n dim_embed = self.dim_embed\n\n # first layers, potentially with U-Net type coupling\n self.blocks = torch.nn.ModuleList()\n for il in range( min( cf.encoder_num_layers, cf.decoder_num_layers) ) :\n\n # self attention sub-block (as in original Vaswani)\n if self_att :\n self.blocks.append( MultiSelfAttentionHead( dim_embed, num_heads, cf.dropout_rate, \n cf.decoder_att_type, cf.with_qk_lnorm,\n cf.grad_checkpointing) )\n # cross attention between encoder and decoder\n if 'dense' == cf.decoder_att_type :\n self.blocks.append( MultiCrossAttentionHead( dim_embed, num_heads_self, num_heads_other, \n cf.dropout_rate, cf.with_qk_lnorm,\n cf.grad_checkpointing, cf.attention ) )\n elif 'axial' in cf.decoder_att_type :\n par = True if 'parallel' in cf.encoder_att_type else False\n self.blocks.append( MultiFieldAxialAttention( [3,2,1], [dim_embed,dim_embed], \n num_heads_self, num_heads_other, par, cf.dropout_rate) )\n else :\n assert False, 'Unsupported attention type: ' + cf.decoder_att_type \n # feature space mapping sub-block\n self.blocks.append( MLP( dim_embed, num_mlp_layers, cf.with_layernorm, \n dropout_rate = cf.dropout_rate, \n grad_checkpointing = cf.grad_checkpointing) )\n\n # remaining strictly non-coupled layers (if decoder is deeper than the encoder)\n dim_embed = self.dim_embed\n for il in range( cf.encoder_num_layers, cf.decoder_num_layers) :\n self.blocks.append( MultiSelfAttentionHead( dim_embed, num_heads, cf.dropout_rate, \n cf.decoder_att_type, cf.with_qk_lnorm))\n self.blocks.append( MLP( dim_embed, num_mlp_layers, cf.with_layernorm,\n grad_checkpointing = cf.grad_checkpointing ))\n\n self.checkpoint = identity\n if cf.grad_checkpointing :\n self.checkpoint = checkpoint_wrapper\n\n ###################################\n def device( self):\n return next(self.parameters()).device\n\n ###################################\n def forward(self, x):\n '''Evaluate decoder'''\n\n dev = self.device()\n\n (decoder_in, encoder_out) = x\n encoder_out.reverse()\n \n token_seq_embed = decoder_in.to( dev, non_blocking=True)\n\n atts = []\n car = self.cf.decoder_cross_att_rate\n for il in range(self.num_layers) : \n token_seq_embed, att = self.checkpoint( self.blocks[2*il], token_seq_embed, \n encoder_out[int(car*il)].to(dev, non_blocking=True) )\n token_seq_embed = self.checkpoint( self.blocks[2*il+1], token_seq_embed, \n encoder_out[int(car*il)].to(dev, non_blocking=True) )\n atts += [ att ]\n\n return token_seq_embed, atts\n\n ###################################\n def get_attention( self, xin, iblock) :\n ''' \n Get attention and projected values from specific layer and her head\n '''\n\n # assert False\n print(\"inside get_attention in transformer_decoder.py\")\n # embedding\n token_seq_embed = decoder_in.to( dev, non_blocking=True)\n car = self.cf.decoder_cross_att_rate\n for il in range(self.num_layers) :\n token_seq_embed = self.checkpoint( self.blocks[2*il], token_seq_embed, \n encoder_out[int(car*il)].to(dev, non_blocking=True) )\n \n atts = self.blocks[2*il].get_attention( token_seq_embed )\n\n return atts #(atts, vsh)" }, { "identifier": "TailEnsemble", "path": "atmorep/transformer/tail_ensemble.py", "snippet": "class TailEnsemble( torch.nn.Module) :\n\n def __init__( self, cf, dim_embed, dim_net_input, net_tail_num_nets = -1 ) :\n \n super( TailEnsemble, self).__init__()\n\n self.cf = cf\n self.dim_embed = dim_embed\n self.dim_input = dim_net_input\n self.net_tail_num_nets = net_tail_num_nets if net_tail_num_nets > 0 else cf.net_tail_num_nets\n\n ###################################################\n def create( self) :\n\n dim = self.dim_embed\n\n # tail networks: use class token to make prediction \n nonlin = torch.nn.GELU()\n self.tail_nets = torch.nn.ModuleList()\n for inet in range( self.net_tail_num_nets) :\n self.tail_nets.append( torch.nn.ModuleList())\n self.tail_nets[-1].append( torch.nn.LayerNorm( dim, elementwise_affine=True))\n for _ in range( self.cf.net_tail_num_layers) :\n self.tail_nets[-1].append( torch.nn.Linear( dim, dim, bias=True))\n self.tail_nets[-1].append( nonlin)\n # un-embedding layer\n self.tail_nets[-1].append( torch.nn.Linear( dim, self.dim_input, bias=True)) \n\n return self \n\n ###################################\n def device( self):\n return next(self.parameters()).device\n\n ###################################################\n def forward( self, rep ) :\n\n rep.to( self.device())\n\n # evaluate ensemble of tail networks\n preds = []\n for tail_net in self.tail_nets : \n cpred = rep\n for block in tail_net :\n cpred = block(cpred)\n preds.append( cpred.unsqueeze(1))\n preds = torch.cat( preds, 1)\n\n # # mean and variance of ensemble\n if 1 == len(self.tail_nets) : # avoid that std_dev is NaN with 1 \"ensemble\" member\n dev = preds.device\n pred = ( torch.mean(preds,1), torch.zeros( torch.std(preds,1).shape, device=dev ), preds )\n else :\n pred = ( torch.mean(preds,1), torch.std(preds,1), preds )\n\n return pred" } ]
import torch import numpy as np import code import atmorep.utils.utils as utils from atmorep.utils.utils import identity from atmorep.utils.utils import NetMode from atmorep.utils.utils import get_model_filename from atmorep.transformer.transformer_base import prepare_token from atmorep.transformer.transformer_base import checkpoint_wrapper from atmorep.datasets.multifield_data_sampler import MultifieldDataSampler from atmorep.transformer.transformer_encoder import TransformerEncoder from atmorep.transformer.transformer_decoder import TransformerDecoder from atmorep.transformer.tail_ensemble import TailEnsemble
11,795
def create( self, devices, load_pretrained=True) : '''Create network''' cf = self.cf self.devices = devices size_token_info = 6 self.fields_coupling_idx = [] self.fields_index = {} for ifield, field_info in enumerate(cf.fields) : self.fields_index[ field_info[0] ] = ifield # # embedding network for global/auxiliary token infos # TODO: only for backward compatibility, remove self.embed_token_info = torch.nn.Linear( cf.size_token_info, cf.size_token_info_net) torch.nn.init.constant_( self.embed_token_info.weight, 0.0) self.embeds_token_info = torch.nn.ModuleList() for ifield, field_info in enumerate( cf.fields) : self.embeds_token_info.append( torch.nn.Linear( cf.size_token_info, cf.size_token_info_net)) if len(field_info[1]) > 4 and load_pretrained : # TODO: inconsistent with embeds_token_info -> version that can handle both # we could imply use the file name: embed_token_info vs embeds_token_info name = 'AtmoRep' + '_embed_token_info' mloaded = torch.load( get_model_filename( name, field_info[1][4][0], field_info[1][4][1])) self.embeds_token_info[-1].load_state_dict( mloaded) print( 'Loaded embed_token_info from id = {}.'.format( field_info[1][4][0] ) ) else : # initalization torch.nn.init.constant_( self.embeds_token_info[-1].weight, 0.0) self.embeds_token_info[-1].bias.data.fill_(0.0) # embedding and encoder self.embeds = torch.nn.ModuleList() self.encoders = torch.nn.ModuleList() self.masks = torch.nn.ParameterList() for field_idx, field_info in enumerate(cf.fields) : # learnabl class token if cf.learnable_mask : mask = torch.nn.Parameter( 0.1 * torch.randn( np.prod( field_info[4]), requires_grad=True)) self.masks.append( mask.to(devices[0])) else : self.masks.append( None) # encoder self.encoders.append( TransformerEncoder( cf, field_idx, True).create()) # load pre-trained model if specified if len(field_info[1]) > 4 and load_pretrained : self.load_block( field_info, 'encoder', self.encoders[-1]) self.embeds.append( self.encoders[-1].embed) # indices of coupled fields for efficient access in forward self.fields_coupling_idx.append( [field_idx]) for field_coupled in field_info[1][2] : if 'axial' in cf.encoder_att_type : self.fields_coupling_idx[field_idx].append( self.fields_index[field_coupled] ) else : for _ in range(cf.coupling_num_heads_per_field) : self.fields_coupling_idx[field_idx].append( self.fields_index[field_coupled] ) # decoder self.decoders = torch.nn.ModuleList() self.field_pred_idxs = [] for field in cf.fields_prediction : for ifield, field_info in enumerate(cf.fields) : if field_info[0] == field[0] : self.field_pred_idxs.append( ifield) break self.decoders.append( TransformerDecoder( cf, field_info ) ) # load pre-trained model if specified if len(field_info[1]) > 4 and load_pretrained : self.load_block( field_info, 'decoder', self.decoders[-1]) # tail networks self.tails = torch.nn.ModuleList() for ifield, field in enumerate(cf.fields_prediction) : field_idx = self.field_pred_idxs[ifield] field_info = cf.fields[field_idx] self.tails.append( TailEnsemble( cf, field_info[1][1], np.prod(field_info[4]) ).create()) # load pre-trained model if specified if len(field_info[1]) > 4 and load_pretrained: self.load_block( field_info, 'tail', self.tails[-1]) # set devices for field_idx, field_info in enumerate(cf.fields) : # find determined device, use default if nothing specified device = self.devices[0] if len(field_info[1]) > 3 : assert field_info[1][3] < 4, 'Only single node model parallelism supported' assert field_info[1][3] < len(devices), 'Per field device id larger than max devices' device = self.devices[ field_info[1][3] ] # set device if self.masks[field_idx] != None : self.masks[field_idx].to(device) self.embeds[field_idx].to(device) self.encoders[field_idx].to(device) for field_idx, field in enumerate(cf.fields_prediction) : field_info = cf.fields[ self.field_pred_idxs[field_idx] ] device = self.devices[0] if len(field_info[1]) > 3 : device = self.devices[ field_info[1][3] ] self.decoders[field_idx].to(device) self.tails[field_idx].to(device) # embed_token_info on device[0] since it is shared by all fields, potentially sub-optimal self.embed_token_info.to(devices[0]) # TODO: only for backward compatibility, remove self.embeds_token_info.to(devices[0])
#################################################################################################### # # Copyright (C) 2022 # #################################################################################################### # # project : atmorep # # author : atmorep collaboration # # description : # # license : # #################################################################################################### # code.interact(local=locals()) # import horovod.torch as hvd #################################################################################################### class AtmoRepData( torch.nn.Module) : def __init__( self, net) : '''Wrapper class for AtmoRep that handles data loading''' super( AtmoRepData, self).__init__() self.data_loader_test = None self.data_loader_train = None self.data_loader_iter = None self.net = net # ensure that all data loaders have the same seed and hence load the same data self.rng_seed = net.cf.rng_seed if not self.rng_seed : self.rng_seed = int(torch.randint( 100000000, (1,))) ################################################### def load_data( self, mode : NetMode, batch_size = -1, num_loader_workers = -1) : '''Load data''' cf = self.net.cf if batch_size < 0 : batch_size = cf.batch_size_max if num_loader_workers < 0 : num_loader_workers = cf.num_loader_workers if mode == NetMode.train : self.data_loader_train = self._load_data( self.dataset_train, batch_size, num_loader_workers) elif mode == NetMode.test : batch_size = cf.batch_size_test self.data_loader_test = self._load_data( self.dataset_test, batch_size, num_loader_workers) else : assert False ################################################### def _load_data( self, dataset, batch_size, num_loader_workers) : '''Private implementation for load''' dataset.load_data( batch_size) loader_params = { 'batch_size': None, 'batch_sampler': None, 'shuffle': False, 'num_workers': num_loader_workers, 'pin_memory': True} data_loader = torch.utils.data.DataLoader( dataset, **loader_params, sampler = None) return data_loader ################################################### def set_data( self, mode : NetMode, times_pos, batch_size = -1, num_loader_workers = -1) : cf = self.net.cf if batch_size < 0 : batch_size = cf.batch_size_train if mode == NetMode.train else cf.batch_size_test dataset = self.dataset_train if mode == NetMode.train else self.dataset_test dataset.set_data( times_pos, batch_size) self._set_data( dataset, mode, batch_size, num_loader_workers) ################################################### def set_global( self, mode : NetMode, times, batch_size = -1, num_loader_workers = -1) : cf = self.net.cf if batch_size < 0 : batch_size = cf.batch_size_train if mode == NetMode.train else cf.batch_size_test dataset = self.dataset_train if mode == NetMode.train else self.dataset_test dataset.set_global( times, batch_size, cf.token_overlap) self._set_data( dataset, mode, batch_size, num_loader_workers) ################################################### def set_location( self, mode : NetMode, pos, years, months, num_t_samples_per_month, batch_size = -1, num_loader_workers = -1) : cf = self.net.cf if batch_size < 0 : batch_size = cf.batch_size_train if mode == NetMode.train else cf.batch_size_test dataset = self.dataset_train if mode == NetMode.train else self.dataset_test dataset.set_location( pos, years, months, num_t_samples_per_month, batch_size) self._set_data( dataset, mode, batch_size, num_loader_workers) ################################################### def _set_data( self, dataset, mode : NetMode, batch_size = -1, loader_workers = -1) : '''Private implementation for set_data, set_global''' cf = self.net.cf if loader_workers < 0 : loader_workers = cf.num_loader_workers loader_params = { 'batch_size': None, 'batch_sampler': None, 'shuffle': False, 'num_workers': loader_workers, 'pin_memory': True} if mode == NetMode.train : self.data_loader_train = torch.utils.data.DataLoader( dataset, **loader_params, sampler = None) elif mode == NetMode.test : self.data_loader_test = torch.utils.data.DataLoader( dataset, **loader_params, sampler = None) else : assert False ################################################### def normalizer( self, field, vl_idx) : if isinstance( field, str) : for fidx, field_info in enumerate(self.cf.fields) : if field == field_info[0] : break assert fidx < len(self.cf.fields), 'invalid field' normalizer = self.dataset_train.datasets[fidx].normalizer elif isinstance( field, int) : normalizer = self.dataset_train.datasets[field][vl_idx].normalizer else : assert False, 'invalid argument type (has to be index to cf.fields or field name)' return normalizer ################################################### def mode( self, mode : NetMode) : if mode == NetMode.train : self.data_loader_iter = iter(self.data_loader_train) self.net.train() elif mode == NetMode.test : self.data_loader_iter = iter(self.data_loader_test) self.net.eval() else : assert False self.cur_mode = mode ################################################### def len( self, mode : NetMode) : if mode == NetMode.train : return len(self.data_loader_train) elif mode == NetMode.test : return len(self.data_loader_test) else : assert False ################################################### def next( self) : return next(self.data_loader_iter) ################################################### def forward( self, xin) : pred = self.net.forward( xin) return pred ################################################### def get_attention( self, xin): #, field_idx) : attn = self.net.get_attention( xin) #, field_idx) return attn ################################################### def create( self, pre_batch, devices, create_net = True, pre_batch_targets = None, load_pretrained=True) : if create_net : self.net.create( devices, load_pretrained) self.pre_batch = pre_batch self.pre_batch_targets = pre_batch_targets cf = self.net.cf self.dataset_train = MultifieldDataSampler( cf.data_dir, cf.years_train, cf.fields, batch_size = cf.batch_size_start, num_t_samples = cf.num_t_samples, num_patches_per_t = cf.num_patches_per_t_train, num_load = cf.num_files_train, pre_batch = self.pre_batch, rng_seed = self.rng_seed, file_shape = cf.file_shape, smoothing = cf.data_smoothing, level_type = cf.level_type, file_format = cf.file_format, month = cf.month, time_sampling = cf.time_sampling, geo_range = cf.geo_range_sampling, fields_targets = cf.fields_targets, pre_batch_targets = self.pre_batch_targets ) self.dataset_test = MultifieldDataSampler( cf.data_dir, cf.years_test, cf.fields, batch_size = cf.batch_size_test, num_t_samples = cf.num_t_samples, num_patches_per_t = cf.num_patches_per_t_test, num_load = cf.num_files_test, pre_batch = self.pre_batch, rng_seed = self.rng_seed, file_shape = cf.file_shape, smoothing = cf.data_smoothing, level_type = cf.level_type, file_format = cf.file_format, month = cf.month, time_sampling = cf.time_sampling, geo_range = cf.geo_range_sampling, lat_sampling_weighted = cf.lat_sampling_weighted, fields_targets = cf.fields_targets, pre_batch_targets = self.pre_batch_targets ) return self #################################################################################################### class AtmoRep( torch.nn.Module) : def __init__(self, cf) : '''Constructor''' super( AtmoRep, self).__init__() self.cf = cf ################################################### def create( self, devices, load_pretrained=True) : '''Create network''' cf = self.cf self.devices = devices size_token_info = 6 self.fields_coupling_idx = [] self.fields_index = {} for ifield, field_info in enumerate(cf.fields) : self.fields_index[ field_info[0] ] = ifield # # embedding network for global/auxiliary token infos # TODO: only for backward compatibility, remove self.embed_token_info = torch.nn.Linear( cf.size_token_info, cf.size_token_info_net) torch.nn.init.constant_( self.embed_token_info.weight, 0.0) self.embeds_token_info = torch.nn.ModuleList() for ifield, field_info in enumerate( cf.fields) : self.embeds_token_info.append( torch.nn.Linear( cf.size_token_info, cf.size_token_info_net)) if len(field_info[1]) > 4 and load_pretrained : # TODO: inconsistent with embeds_token_info -> version that can handle both # we could imply use the file name: embed_token_info vs embeds_token_info name = 'AtmoRep' + '_embed_token_info' mloaded = torch.load( get_model_filename( name, field_info[1][4][0], field_info[1][4][1])) self.embeds_token_info[-1].load_state_dict( mloaded) print( 'Loaded embed_token_info from id = {}.'.format( field_info[1][4][0] ) ) else : # initalization torch.nn.init.constant_( self.embeds_token_info[-1].weight, 0.0) self.embeds_token_info[-1].bias.data.fill_(0.0) # embedding and encoder self.embeds = torch.nn.ModuleList() self.encoders = torch.nn.ModuleList() self.masks = torch.nn.ParameterList() for field_idx, field_info in enumerate(cf.fields) : # learnabl class token if cf.learnable_mask : mask = torch.nn.Parameter( 0.1 * torch.randn( np.prod( field_info[4]), requires_grad=True)) self.masks.append( mask.to(devices[0])) else : self.masks.append( None) # encoder self.encoders.append( TransformerEncoder( cf, field_idx, True).create()) # load pre-trained model if specified if len(field_info[1]) > 4 and load_pretrained : self.load_block( field_info, 'encoder', self.encoders[-1]) self.embeds.append( self.encoders[-1].embed) # indices of coupled fields for efficient access in forward self.fields_coupling_idx.append( [field_idx]) for field_coupled in field_info[1][2] : if 'axial' in cf.encoder_att_type : self.fields_coupling_idx[field_idx].append( self.fields_index[field_coupled] ) else : for _ in range(cf.coupling_num_heads_per_field) : self.fields_coupling_idx[field_idx].append( self.fields_index[field_coupled] ) # decoder self.decoders = torch.nn.ModuleList() self.field_pred_idxs = [] for field in cf.fields_prediction : for ifield, field_info in enumerate(cf.fields) : if field_info[0] == field[0] : self.field_pred_idxs.append( ifield) break self.decoders.append( TransformerDecoder( cf, field_info ) ) # load pre-trained model if specified if len(field_info[1]) > 4 and load_pretrained : self.load_block( field_info, 'decoder', self.decoders[-1]) # tail networks self.tails = torch.nn.ModuleList() for ifield, field in enumerate(cf.fields_prediction) : field_idx = self.field_pred_idxs[ifield] field_info = cf.fields[field_idx] self.tails.append( TailEnsemble( cf, field_info[1][1], np.prod(field_info[4]) ).create()) # load pre-trained model if specified if len(field_info[1]) > 4 and load_pretrained: self.load_block( field_info, 'tail', self.tails[-1]) # set devices for field_idx, field_info in enumerate(cf.fields) : # find determined device, use default if nothing specified device = self.devices[0] if len(field_info[1]) > 3 : assert field_info[1][3] < 4, 'Only single node model parallelism supported' assert field_info[1][3] < len(devices), 'Per field device id larger than max devices' device = self.devices[ field_info[1][3] ] # set device if self.masks[field_idx] != None : self.masks[field_idx].to(device) self.embeds[field_idx].to(device) self.encoders[field_idx].to(device) for field_idx, field in enumerate(cf.fields_prediction) : field_info = cf.fields[ self.field_pred_idxs[field_idx] ] device = self.devices[0] if len(field_info[1]) > 3 : device = self.devices[ field_info[1][3] ] self.decoders[field_idx].to(device) self.tails[field_idx].to(device) # embed_token_info on device[0] since it is shared by all fields, potentially sub-optimal self.embed_token_info.to(devices[0]) # TODO: only for backward compatibility, remove self.embeds_token_info.to(devices[0])
self.checkpoint = identity
0
2023-10-09 19:42:46+00:00
16k
NKI-AI/ahcore
ahcore/callbacks/wsi_metric_callback.py
[ { "identifier": "WriteH5Callback", "path": "ahcore/callbacks/h5_callback.py", "snippet": "class WriteH5Callback(Callback):\n def __init__(\n self,\n max_queue_size: int,\n max_concurrent_writers: int,\n dump_dir: Path,\n normalization_type: str = str(NormalizationType.LOGITS),\n precision: str = str(InferencePrecision.FP32),\n ):\n \"\"\"\n Callback to write predictions to H5 files. This callback is used to write whole-slide predictions to single H5\n files in a separate thread.\n\n TODO:\n - Add support for distributed data parallel\n\n Parameters\n ----------\n max_queue_size : int\n The maximum number of items to store in the queue (i.e. tiles).\n max_concurrent_writers : int\n The maximum number of concurrent writers.\n dump_dir : pathlib.Path\n The directory to dump the H5 files to.\n normalization_type : str\n The normalization type to use for the predictions. One of \"sigmoid\", \"softmax\" or \"logits\".\n precision : str\n The precision to use for the predictions. One of \"float16\", \"float32\" or \"uint8\".\n \"\"\"\n super().__init__()\n self._writers: dict[str, _WriterMessage] = {}\n self._current_filename = None\n self._dump_dir = Path(dump_dir)\n self._max_queue_size = max_queue_size\n self._semaphore = Semaphore(max_concurrent_writers)\n self._dataset_index = 0\n self._normalization_type: NormalizationType = NormalizationType(normalization_type)\n self._precision: InferencePrecision = InferencePrecision(precision)\n\n self._logger = get_logger(type(self).__name__)\n\n @property\n def dump_dir(self) -> Path:\n return self._dump_dir\n\n def __process_management(self) -> None:\n \"\"\"\n Handle the graceful termination of multiple processes at the end of h5 writing.\n This block ensures proper release of resources allocated during multiprocessing.\n\n Returns\n -------\n None\n \"\"\"\n assert self._current_filename, \"_current_filename shouldn't be None here\"\n\n self._writers[self._current_filename][\"queue\"].put(None)\n self._writers[self._current_filename][\"process\"].join()\n self._writers[self._current_filename][\"process\"].close()\n self._writers[self._current_filename][\"queue\"].close()\n\n @property\n def writers(self) -> dict[str, _WriterMessage]:\n return self._writers\n\n def _batch_end(\n self,\n trainer: pl.Trainer,\n pl_module: pl.LightningModule,\n outputs: Any,\n batch: Any,\n batch_idx: int,\n stage: str,\n dataloader_idx: int = 0,\n ) -> None:\n filename = batch[\"path\"][0] # Filenames are constant across the batch.\n if any([filename != path for path in batch[\"path\"]]):\n raise ValueError(\n \"All paths in a batch must be the same. \"\n \"Either use batch_size=1 or ahcore.data.samplers.WsiBatchSampler.\"\n )\n\n if filename != self._current_filename:\n output_filename = _get_h5_output_filename(\n self.dump_dir,\n filename,\n model_name=str(pl_module.name),\n step=pl_module.global_step,\n )\n output_filename.parent.mkdir(parents=True, exist_ok=True)\n link_fn = (\n self.dump_dir / \"outputs\" / f\"{pl_module.name}\" / f\"step_{pl_module.global_step}\" / \"image_h5_link.txt\"\n )\n with open(link_fn, \"a\" if link_fn.is_file() else \"w\") as file:\n file.write(f\"{filename},{output_filename}\\n\")\n\n self._logger.debug(\"%s -> %s\", filename, output_filename)\n if self._current_filename is not None:\n self.__process_management()\n self._semaphore.release()\n\n self._semaphore.acquire()\n\n if stage == \"validate\":\n total_dataset: ConcatDataset = trainer.datamodule.validate_dataset # type: ignore\n elif stage == \"predict\":\n total_dataset: ConcatDataset = trainer.predict_dataloaders.dataset # type: ignore\n else:\n raise NotImplementedError(f\"Stage {stage} is not supported for {self.__class__.__name__}.\")\n\n current_dataset: TiledWsiDataset\n current_dataset, _ = total_dataset.index_to_dataset(self._dataset_index) # type: ignore\n slide_image = current_dataset.slide_image\n\n data_description: DataDescription = pl_module.data_description # type: ignore\n inference_grid: GridDescription = data_description.inference_grid\n\n mpp = inference_grid.mpp\n if mpp is None:\n mpp = slide_image.mpp\n\n _, size = slide_image.get_scaled_slide_bounds(slide_image.get_scaling(mpp))\n num_samples = len(current_dataset)\n\n # Let's get the data_description, so we can figure out the tile size and things like that\n tile_size = inference_grid.tile_size\n tile_overlap = inference_grid.tile_overlap\n\n # TODO: We are really putting strange things in the Queue if we may believe mypy\n new_queue: Queue[Any] = Queue() # pylint: disable=unsubscriptable-object\n parent_conn, child_conn = Pipe()\n new_writer = H5FileImageWriter(\n output_filename,\n size=size,\n mpp=mpp,\n tile_size=tile_size,\n tile_overlap=tile_overlap,\n num_samples=num_samples,\n color_profile=None,\n is_compressed_image=False,\n progress=None,\n precision=InferencePrecision(self._precision),\n )\n new_process = Process(target=new_writer.consume, args=(self.generator(new_queue), child_conn))\n new_process.start()\n self._writers[filename] = {\n \"queue\": new_queue,\n \"writer\": new_writer,\n \"process\": new_process,\n \"connection\": parent_conn,\n }\n self._current_filename = filename\n\n prediction = outputs[\"prediction\"]\n prediction = NormalizationType.normalize(self._normalization_type)(prediction).detach().cpu().numpy()\n coordinates_x, coordinates_y = batch[\"coordinates\"]\n coordinates = torch.stack([coordinates_x, coordinates_y]).T.detach().cpu().numpy()\n self._writers[filename][\"queue\"].put((coordinates, prediction))\n self._dataset_index += prediction.shape[0]\n\n def _epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:\n if self._current_filename is not None:\n self.__process_management()\n self._semaphore.release()\n self._dataset_index = 0\n # Reset current filename to None for correct execution of subsequent validation loop\n self._current_filename = None\n # Clear all the writers from the current epoch\n self._writers = {}\n\n def on_validation_batch_end(\n self,\n trainer: pl.Trainer,\n pl_module: pl.LightningModule,\n outputs: Any,\n batch: Any,\n batch_idx: int,\n dataloader_idx: int = 0,\n ) -> None:\n self._batch_end(trainer, pl_module, outputs, batch, batch_idx, \"validate\", dataloader_idx)\n\n def on_predict_batch_end(\n self,\n trainer: pl.Trainer,\n pl_module: pl.LightningModule,\n outputs: Any,\n batch: Any,\n batch_idx: int,\n dataloader_idx: int = 0,\n ) -> None:\n self._batch_end(trainer, pl_module, outputs, batch, batch_idx, \"predict\", dataloader_idx)\n\n def on_validation_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:\n self._epoch_end(trainer, pl_module)\n\n def on_predict_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:\n self._epoch_end(trainer, pl_module)\n\n @staticmethod\n def generator(\n queue: Queue[Optional[GenericArray]], # pylint: disable=unsubscriptable-object\n ) -> Generator[GenericArray, None, None]:\n while True:\n batch = queue.get()\n if batch is None:\n break\n yield batch" }, { "identifier": "AhCoreLightningModule", "path": "ahcore/lit_module.py", "snippet": "class AhCoreLightningModule(pl.LightningModule):\n RELEVANT_KEYS = [\n \"coordinates\",\n \"mpp\",\n \"path\",\n \"region_index\",\n \"grid_local_coordinates\",\n \"grid_index\",\n ]\n\n def __init__(\n self,\n model: nn.Module,\n optimizer: torch.optim.Optimizer, # noqa\n data_description: DataDescription,\n loss: nn.Module | None = None,\n augmentations: dict[str, nn.Module] | None = None,\n metrics: dict[str, MetricFactory | WSIMetricFactory] | None = None,\n scheduler: torch.optim.lr_scheduler.LRScheduler | None = None, # noqa\n ):\n super().__init__()\n\n self.save_hyperparameters(\n logger=False,\n ignore=[\n \"model\",\n \"augmentations\",\n \"metrics\",\n \"data_description\",\n \"loss\",\n ],\n ) # TODO: we should send the hyperparams to the logger elsewhere\n\n self._num_classes = data_description.num_classes\n self._model = model(out_channels=self._num_classes)\n self._augmentations = augmentations\n\n self._loss = loss\n if metrics is not None:\n tile_metric = metrics.get(\"tile_level\")\n wsi_metric = metrics.get(\"wsi_level\", None)\n if tile_metric is not None and not isinstance(tile_metric, MetricFactory):\n raise ConfigurationError(\"Tile metrics must be of type MetricFactory\")\n if wsi_metric is not None and not isinstance(wsi_metric, WSIMetricFactory):\n raise ConfigurationError(\"WSI metrics must be of type WSIMetricFactory\")\n\n self._tile_metric = tile_metric\n self._wsi_metrics = wsi_metric\n\n self._data_description = data_description\n\n @property\n def wsi_metrics(self) -> WSIMetricFactory | None:\n return self._wsi_metrics\n\n @property\n def name(self) -> str:\n return str(self._model.__class__.__name__)\n\n def forward(self, sample: torch.Tensor) -> Any:\n \"\"\"This function is only used during inference\"\"\"\n self._model.eval()\n return self._model.forward(sample)\n\n @property\n def data_description(self) -> DataDescription:\n return self._data_description\n\n def _compute_metrics(\n self,\n prediction: torch.Tensor,\n target: torch.Tensor,\n roi: torch.Tensor | None,\n stage: TrainerFn | str,\n ) -> dict[str, torch.Tensor]:\n if not self._tile_metric:\n return {}\n\n _stage = stage.value if isinstance(stage, TrainerFn) else stage\n metrics = {f\"{_stage}/{k}\": v for k, v in self._tile_metric(prediction, target, roi).items()}\n return metrics\n\n def do_step(self, batch: DlupDatasetSample, batch_idx: int, stage: TrainerFn | str) -> LitModuleSample:\n if self._augmentations and stage in self._augmentations:\n batch = self._augmentations[stage](batch)\n\n if self._loss is None:\n raise RuntimeError(\n f\"Loss is not defined for {self.__class__.__name__}. \"\n f\"This is required during training and validation\"\n )\n\n _target = batch[\"target\"]\n # Batch size is required for accurate loss calculation and logging\n batch_size = batch[\"image\"].shape[0]\n # ROIs can reduce the usable area of the inputs, the loss should be scaled appropriately\n roi = batch.get(\"roi\", None)\n\n if stage == \"fit\":\n _prediction = self._model(batch[\"image\"])\n batch[\"prediction\"] = _prediction\n else:\n batch = {**batch, **self._get_inference_prediction(batch[\"image\"])}\n _prediction = batch[\"prediction\"]\n\n loss = self._loss(_prediction, _target, roi)\n\n # The relevant_dict contains values to know where the tiles originate.\n _relevant_dict = {k: v for k, v in batch.items() if k in self.RELEVANT_KEYS}\n _metrics = self._compute_metrics(_prediction, _target, roi, stage=stage)\n _loss = loss.mean()\n # TODO: This can be a TypedDict\n output = {\n \"loss\": _loss,\n \"loss_per_sample\": loss.clone().detach(),\n \"metrics\": _metrics,\n **_relevant_dict,\n }\n if stage != \"fit\":\n output[\"prediction\"] = _prediction\n\n _stage = stage.value if isinstance(stage, TrainerFn) else stage\n\n self.log(\n f\"{_stage}/loss\",\n _loss,\n batch_size=batch_size,\n sync_dist=True,\n on_epoch=True,\n prog_bar=True,\n )\n\n # Log the metrics\n self.log_dict(\n _metrics,\n batch_size=batch_size,\n sync_dist=True,\n prog_bar=False,\n on_epoch=True,\n on_step=False,\n )\n\n return output\n\n def _get_inference_prediction(self, _input: torch.Tensor) -> dict[str, torch.Tensor]:\n output = {}\n output[\"prediction\"] = self._model(_input)\n return output\n\n def training_step(self, batch: dict[str, Any], batch_idx: int) -> dict[str, Any]:\n output = self.do_step(batch, batch_idx, stage=\"fit\")\n return output\n\n def validation_step(self, batch: dict[str, Any], batch_idx: int) -> dict[str, Any]:\n output = self.do_step(batch, batch_idx, stage=\"validate\")\n\n # This is a sanity check. We expect the filenames to be constant across the batch.\n filename = batch[\"path\"][0]\n if any([filename != f for f in batch[\"path\"]]):\n raise ValueError(\"Filenames are not constant across the batch.\")\n return output\n\n def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> Any:\n if self._augmentations and \"predict\" in self._augmentations:\n batch = self._augmentations[\"predict\"](batch)\n\n _relevant_dict = {k: v for k, v in batch.items() if k in self.RELEVANT_KEYS}\n batch = {**batch, **self._get_inference_prediction(batch[\"image\"])}\n _prediction = batch[\"prediction\"]\n output = {\"prediction\": _prediction, **_relevant_dict}\n\n # This is a sanity check. We expect the filenames to be constant across the batch.\n filename = batch[\"path\"][0]\n if any([filename != f for f in batch[\"path\"]]):\n raise ValueError(\"Filenames are not constant across the batch.\")\n return output\n\n def configure_optimizers(self) -> Any:\n optimizer = self.hparams.optimizer(params=self.parameters()) # type: ignore\n if self.hparams.scheduler is not None: # type: ignore\n scheduler = self.hparams.scheduler(optimizer=optimizer) # type: ignore\n return {\n \"optimizer\": optimizer,\n \"lr_scheduler\": {\n \"scheduler\": scheduler,\n \"monitor\": \"validate/loss\",\n \"interval\": \"epoch\",\n \"frequency\": self.trainer.check_val_every_n_epoch,\n },\n }\n return {\"optimizer\": optimizer}" }, { "identifier": "WSIMetricFactory", "path": "ahcore/metrics/metrics.py", "snippet": "class WSIMetricFactory:\n # TODO: this should be rewritten to actually be a factory\n def __init__(self, metrics: list[WSIMetric]) -> None:\n super().__init__()\n names = [metric.name for metric in metrics]\n if len(set(names)) != len(names):\n raise RuntimeError(\"Each individual metric must have a different name.\")\n\n self._metrics = metrics\n\n @classmethod\n def for_segmentation(cls, *args: Any, **kwargs: Any) -> WSIMetricFactory:\n dices = WSIDiceMetric(*args, **kwargs)\n return cls([dices])\n\n @classmethod\n def for_wsi_classification(cls, *args: Any, **kwargs: Any) -> WSIMetricFactory:\n raise NotImplementedError\n\n @classmethod\n def for_tile_classification(cls, roi_name: str, label: str, threshold: float) -> WSIMetricFactory:\n raise NotImplementedError\n\n def process_batch(\n self,\n predictions: torch.Tensor,\n target: torch.Tensor,\n wsi_name: str,\n roi: torch.Tensor | None,\n ) -> None:\n for metric in self._metrics:\n metric.process_batch(predictions, target, wsi_name=wsi_name, roi=roi)\n\n def get_average_score(\n self, precomputed_output: list[list[dict[str, dict[str, float]]]] | None = None\n ) -> dict[str, float]:\n output = {}\n for metric in self._metrics:\n output.update(metric.get_average_score(precomputed_output))\n return output\n\n def reset(self) -> None:\n for metric in self._metrics:\n metric.reset()\n\n def __repr__(self) -> str:\n return f\"{type(self).__name__}(metrics={self._metrics})\"" }, { "identifier": "H5FileImageReader", "path": "ahcore/readers.py", "snippet": "class H5FileImageReader:\n def __init__(self, filename: Path, stitching_mode: StitchingMode) -> None:\n self._filename = filename\n self._stitching_mode = stitching_mode\n\n self.__empty_tile: GenericArray | None = None\n\n self._h5file: Optional[h5py.File] = None\n self._metadata = None\n self._mpp = None\n self._tile_size = None\n self._tile_overlap = None\n self._size = None\n self._num_channels = None\n self._dtype = None\n self._stride = None\n\n @classmethod\n def from_file_path(cls, filename: Path, stitching_mode: StitchingMode = StitchingMode.CROP) -> \"H5FileImageReader\":\n return cls(filename=filename, stitching_mode=stitching_mode)\n\n @property\n def size(self) -> tuple[int, int]:\n if not self._size:\n self._open_file()\n assert self._size\n return self._size\n\n @property\n def mpp(self) -> float:\n if not self._mpp:\n self._open_file()\n assert self._mpp\n return self._mpp\n\n def get_mpp(self, scaling: Optional[float]) -> float:\n if not self._mpp:\n self._open_file()\n assert self._mpp\n if scaling is None:\n return self.mpp\n\n return self._mpp / scaling\n\n def get_scaling(self, mpp: Optional[float]) -> float:\n \"\"\"Inverse of get_mpp().\"\"\"\n if not self._mpp:\n self._open_file()\n assert self._mpp\n if not mpp:\n return 1.0\n return self._mpp / mpp\n\n def _open_file(self) -> None:\n if not self._filename.is_file():\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), str(self._filename))\n\n try:\n self._h5file = h5py.File(self._filename, \"r\")\n except OSError as e:\n logger.error(f\"Could not open file {self._filename}: {e}\")\n raise e\n\n try:\n self._metadata = json.loads(self._h5file.attrs[\"metadata\"])\n except KeyError as e:\n logger.error(f\"Could not read metadata from file {self._filename}: {e}\")\n raise e\n\n if not self._metadata:\n raise ValueError(\"Metadata of h5 file is empty.\")\n\n self._mpp = self._metadata[\"mpp\"]\n self._tile_size = self._metadata[\"tile_size\"]\n self._tile_overlap = self._metadata[\"tile_overlap\"]\n self._size = self._metadata[\"size\"]\n self._num_channels = self._metadata[\"num_channels\"]\n self._dtype = self._metadata[\"dtype\"]\n self._precision = self._metadata[\"precision\"]\n self._multiplier = self._metadata[\"multiplier\"]\n self._stride = (\n self._tile_size[0] - self._tile_overlap[0],\n self._tile_size[1] - self._tile_overlap[1],\n )\n\n if self._metadata[\"has_color_profile\"]:\n _color_profile = self._h5file[\"color_profile\"][()].tobytes()\n raise NotImplementedError(f\"Color profiles are not yet implemented, and are present in {self._filename}.\")\n\n def __enter__(self) -> \"H5FileImageReader\":\n if self._h5file is None:\n self._open_file()\n return self\n\n def _empty_tile(self) -> GenericArray:\n if self.__empty_tile is not None:\n return self.__empty_tile\n\n # When this happens we would already be in the read_region, and self._num_channels would be populated.\n assert self._num_channels\n\n self.__empty_tile = np.zeros((self._num_channels, *self._tile_size), dtype=self._dtype)\n return self.__empty_tile\n\n def read_region(\n self,\n location: tuple[int, int],\n scaling: float,\n size: tuple[int, int],\n ) -> GenericArray:\n \"\"\"\n\n Parameters\n ----------\n location : tuple[int, int]\n Location from the top left (x, y) in pixel coordinates given at the requested scaling.\n scaling : float\n size : tuple[int, int]\n Size of the output region\n\n Returns\n -------\n np.ndarray\n The requested region.\n \"\"\"\n if scaling == 1.0:\n return self.read_region_raw(location, size)\n\n order = 1\n # Calculate original location and size considering the scaling\n\n # unpack for mypy\n l1, l2 = location\n s1, s2 = size\n\n original_location = (\n int(math.floor(l1 / scaling)) - order,\n int(math.floor(l2 / scaling)) - order,\n )\n original_size = (\n int(math.ceil(s1 / scaling)) + order,\n int(math.ceil(s2 / scaling)) + order,\n )\n\n raw_region = self.read_region_raw(original_location, original_size)\n\n # Determine the fractional start and end coordinates for mapping\n fractional_start = tuple(map(lambda _, ol: (_ / scaling) - ol + order, location, original_location))\n fractional_end = tuple(fs + size[i] / scaling for i, fs in enumerate(fractional_start))\n\n # Create an array of coordinates for map_coordinates\n # mypy doesn't properly understand yet that the complex type is valid\n coordinates = np.mgrid[\n fractional_start[0] : fractional_end[0] : complex(size[0]), # type: ignore\n fractional_start[1] : fractional_end[1] : complex(size[1]), # type: ignore\n ]\n coordinates = np.moveaxis(coordinates, 0, -1)\n\n # Interpolate using map_coordinates for all channels\n grid = np.mgrid[: raw_region.shape[0]]\n coordinates = np.concatenate([grid[:, None, None], coordinates], axis=0)\n # scipy doesn't have proper typing yet\n rescaled_region = cast(GenericArray, map_coordinates(raw_region, coordinates, order=order))\n\n return rescaled_region\n\n def read_region_raw(self, location: tuple[int, int], size: tuple[int, int]) -> GenericArray:\n \"\"\"\n Reads a region in the stored h5 file. This function stitches the regions as saved in the h5 file. Doing this\n it takes into account:\n 1) The region overlap, several region merging strategies are implemented: cropping, averaging across borders\n and taking the maximum across borders.\n 2) If tiles are saved or not. In case the tiles are skipped due to a background mask, an empty tile is returned.\n\n Parameters\n ----------\n location : tuple[int, int]\n Coordinates (x, y) of the upper left corner of the region.\n size : tuple[int, int]\n The (h, w) size of the extracted region.\n\n Returns\n -------\n np.ndarray\n Extracted region\n \"\"\"\n if self._h5file is None:\n self._open_file()\n assert self._h5file, \"File is not open. Should not happen\"\n assert self._tile_size\n assert self._tile_overlap\n\n image_dataset = self._h5file[\"data\"]\n num_tiles = self._metadata[\"num_tiles\"]\n tile_indices = self._h5file[\"tile_indices\"]\n\n total_rows = math.ceil((self._size[1] - self._tile_overlap[1]) / self._stride[1])\n total_cols = math.ceil((self._size[0] - self._tile_overlap[0]) / self._stride[0])\n\n assert total_rows * total_cols == num_tiles\n\n x, y = location\n w, h = size\n if x < 0 or y < 0 or x + w > self._size[0] or y + h > self._size[1]:\n logger.error(f\"Requested region is out of bounds: {location}, {self._size}\")\n raise ValueError(\"Requested region is out of bounds\")\n\n start_row = y // self._stride[1]\n end_row = min((y + h - 1) // self._stride[1] + 1, total_rows)\n start_col = x // self._stride[0]\n end_col = min((x + w - 1) // self._stride[0] + 1, total_cols)\n\n if self._stitching_mode == StitchingMode.AVERAGE:\n divisor_array = np.zeros((h, w), dtype=np.uint8)\n stitched_image = np.zeros((self._num_channels, h, w), dtype=self._dtype)\n for i in range(start_row, end_row):\n for j in range(start_col, end_col):\n tile_idx = (i * total_cols) + j\n # Map through tile indices\n tile_index_in_image_dataset = tile_indices[tile_idx]\n tile = (\n self._empty_tile()\n if tile_index_in_image_dataset == -1\n else image_dataset[tile_index_in_image_dataset]\n )\n start_y = i * self._stride[1] - y\n end_y = start_y + self._tile_size[1]\n start_x = j * self._stride[0] - x\n end_x = start_x + self._tile_size[0]\n\n img_start_y = max(0, start_y)\n img_end_y = min(h, end_y)\n img_start_x = max(0, start_x)\n img_end_x = min(w, end_x)\n\n if self._stitching_mode == StitchingMode.CROP:\n crop_start_y = img_start_y - start_y\n crop_end_y = img_end_y - start_y\n crop_start_x = img_start_x - start_x\n crop_end_x = img_end_x - start_x\n\n bbox = (crop_start_x, crop_start_y), (\n crop_end_x - crop_start_x,\n crop_end_y - crop_start_y,\n )\n cropped_tile = crop_to_bbox(tile, bbox)\n stitched_image[:, img_start_y:img_end_y, img_start_x:img_end_x] = cropped_tile\n\n elif self._stitching_mode == StitchingMode.AVERAGE:\n raise NotImplementedError\n tile_start_y = max(0, -start_y)\n tile_end_y = img_end_y - img_start_y\n tile_start_x = max(0, -start_x)\n tile_end_x = img_end_x - img_start_x\n\n # TODO: Replace this with crop_to_bbox\n cropped_tile = tile[tile_start_y:tile_end_y, tile_start_x:tile_end_x]\n stitched_image[img_start_y:img_end_y, img_start_x:img_end_x] += cropped_tile\n divisor_array[img_start_y:img_end_y, img_start_x:img_end_x] += 1\n else:\n raise ValueError(\"Unsupported stitching mode\")\n\n if self._stitching_mode == StitchingMode.AVERAGE:\n stitched_image = (stitched_image / divisor_array[..., np.newaxis]).astype(float)\n\n if self._precision != str(InferencePrecision.FP32):\n # Always convert to float32.\n stitched_image = stitched_image / self._multiplier\n stitched_image = stitched_image.astype(np.float32)\n\n return stitched_image\n\n def close(self) -> None:\n if self._h5file is not None:\n self._h5file.close() # Close the file in close\n del self._h5file # Reset the h5file attribute\n\n def __exit__(\n self,\n exc_type: Optional[Type[BaseException]],\n exc_val: Optional[BaseException],\n exc_tb: Optional[TracebackType],\n ) -> Literal[False]:\n self.close()\n return False" }, { "identifier": "StitchingMode", "path": "ahcore/readers.py", "snippet": "class StitchingMode(str, Enum):\n CROP = \"crop\"\n AVERAGE = \"average\"\n MAXIMUM = \"maximum\"" }, { "identifier": "_get_h5_output_filename", "path": "ahcore/utils/callbacks.py", "snippet": "def _get_h5_output_filename(dump_dir: Path, input_path: Path, model_name: str, step: None | int | str = None) -> Path:\n hex_dig = _get_uuid_for_filename(input_path=input_path)\n\n # Return the hashed filename with the new extension\n if step is not None:\n return dump_dir / \"outputs\" / model_name / f\"step_{step}\" / f\"{hex_dig}.h5\"\n return dump_dir / \"outputs\" / model_name / f\"{hex_dig}.h5\"" }, { "identifier": "_ValidationDataset", "path": "ahcore/utils/callbacks.py", "snippet": "class _ValidationDataset(Dataset[DlupDatasetSample]):\n \"\"\"Helper dataset to compute the validation metrics.\"\"\"\n\n def __init__(\n self,\n data_description: Optional[DataDescription],\n native_mpp: float,\n reader: H5FileImageReader,\n annotations: Optional[WsiAnnotations] = None,\n mask: Optional[WsiAnnotations] = None,\n region_size: tuple[int, int] = (1024, 1024),\n ):\n \"\"\"\n Parameters\n ----------\n data_description : DataDescription\n native_mpp : float\n The actual mpp of the underlying image.\n reader : H5FileImageReader\n annotations : WsiAnnotations\n mask : WsiAnnotations\n region_size : Tuple[int, int]\n The region size to use to split up the image into regions.\n \"\"\"\n super().__init__()\n self._data_description = data_description\n self._native_mpp = native_mpp\n self._scaling = self._native_mpp / reader.mpp\n self._reader = reader\n self._region_size = region_size\n self._logger = get_logger(type(self).__name__)\n\n self._annotations = self._validate_annotations(annotations)\n self._mask = self._validate_annotations(mask)\n\n self._grid = Grid.from_tiling(\n (0, 0),\n reader.size,\n tile_size=self._region_size,\n tile_overlap=(0, 0),\n mode=TilingMode.overflow,\n order=GridOrder.C,\n )\n\n self._regions = self._generate_regions()\n self._logger.debug(f\"Number of validation regions: {len(self._regions)}\")\n\n def _validate_annotations(self, annotations: Optional[WsiAnnotations]) -> Optional[WsiAnnotations]:\n if annotations is None:\n return None\n\n if isinstance(annotations, WsiAnnotations):\n if self._data_description is None:\n raise ValueError(\n \"Annotations as a `WsiAnnotations` class are provided but no data description is given.\"\n \"This is required to map the labels to indices.\"\n )\n elif isinstance(annotations, SlideImage):\n pass # We do not need a specific test for this\n else:\n raise NotImplementedError(f\"Annotations of type {type(annotations)} are not supported.\")\n\n return annotations\n\n def _generate_regions(self) -> list[tuple[int, int]]:\n \"\"\"Generate the regions to use. These regions are filtered grid cells where there is a mask.\n\n Returns\n -------\n List[Tuple[int, int]]\n The list of regions.\n \"\"\"\n regions = []\n for coordinates in self._grid:\n _coordinates = (coordinates[0], coordinates[1])\n if self._mask is None or self._is_masked(_coordinates):\n regions.append(_coordinates)\n return regions\n\n def _is_masked(self, coordinates: tuple[int, int]) -> bool:\n \"\"\"Check if the region is masked. This works with any masking function that supports a `read_region` method or\n returns a list of annotations with an `area` attribute. In case there are elements of the form `Point` in the\n annotation list, these are also added.\n\n Parameters\n ----------\n coordinates : Tuple[int, int]\n The coordinates of the region to check.\n\n Returns\n -------\n bool\n True if the region is masked, False otherwise. Will also return True when there is no mask.\n \"\"\"\n if self._mask is None:\n return True\n\n region_mask = self._mask.read_region(coordinates, self._scaling, self._region_size)\n\n if isinstance(region_mask, np.ndarray):\n return region_mask.sum() > 0\n\n # We check if the region is not a Point, otherwise this annotation is always included\n # Else, we compute if there is a positive area in the region.\n return bool(sum(_.area if _ is not isinstance(_, (Point, MultiPoint)) else 1.0 for _ in region_mask) > 0)\n\n def __getitem__(self, idx: int) -> dict[str, Any]:\n sample = {}\n coordinates = self._regions[idx]\n\n sample[\"prediction\"] = self._get_h5_region(coordinates)\n\n if self._annotations is not None:\n target, roi = self._get_annotation_data(coordinates)\n if roi is not None:\n sample[\"roi\"] = roi.astype(np.uint8)\n else:\n sample[\"roi\"] = None # type: ignore\n sample[\"target\"] = target\n\n return sample\n\n def _get_h5_region(self, coordinates: tuple[int, int]) -> npt.NDArray[np.uint8 | np.uint16 | np.float32 | np.bool_]:\n x, y = coordinates\n width, height = self._region_size\n\n if x + width > self._reader.size[0] or y + height > self._reader.size[1]:\n region = self._read_and_pad_region(coordinates)\n else:\n region = self._reader.read_region_raw(coordinates, self._region_size)\n return region\n\n def _read_and_pad_region(self, coordinates: tuple[int, int]) -> npt.NDArray[Any]:\n x, y = coordinates\n width, height = self._region_size\n new_width = min(width, self._reader.size[0] - x)\n new_height = min(height, self._reader.size[1] - y)\n clipped_region = self._reader.read_region_raw((x, y), (new_width, new_height))\n\n prediction = np.zeros((clipped_region.shape[0], *self._region_size), dtype=clipped_region.dtype)\n prediction[:, :new_height, :new_width] = clipped_region\n return prediction\n\n def _get_annotation_data(\n self, coordinates: tuple[int, int]\n ) -> tuple[npt.NDArray[np.float32], npt.NDArray[np.int_] | None]:\n if not self._annotations:\n raise ValueError(\"No annotations are provided.\")\n\n if not self._data_description:\n raise ValueError(\"No data description is provided.\")\n\n if not self._data_description.index_map:\n raise ValueError(\"Index map is not provided.\")\n\n _annotations = self._annotations.read_region(coordinates, self._scaling, self._region_size)\n\n if self._data_description.remap_labels:\n _annotations = rename_labels(_annotations, remap_labels=self._data_description.remap_labels)\n\n points, boxes, region, roi = convert_annotations(\n _annotations,\n self._region_size,\n index_map=self._data_description.index_map,\n roi_name=self._data_description.roi_name,\n )\n encoded_region = one_hot_encoding(index_map=self._data_description.index_map, mask=region)\n if roi is not None:\n return encoded_region, roi[np.newaxis, ...]\n return encoded_region, None\n\n def __iter__(self) -> Iterator[dict[str, Any]]:\n for idx in range(len(self)):\n yield self[idx]\n\n def __len__(self) -> int:\n return len(self._regions)" }, { "identifier": "DataDescription", "path": "ahcore/utils/data.py", "snippet": "class DataDescription(BaseModel):\n mask_label: Optional[str] = None\n mask_threshold: Optional[float] = None # This is only used for training\n roi_name: Optional[str] = None\n num_classes: PositiveInt\n data_dir: Path\n manifest_database_uri: str\n manifest_name: str\n split_version: str\n annotations_dir: Path\n training_grid: GridDescription\n inference_grid: GridDescription\n index_map: Optional[Dict[str, int]]\n remap_labels: Optional[Dict[str, str]] = None\n use_class_weights: Optional[bool] = False\n convert_mask_to_rois: bool = True\n use_roi: bool = True\n apply_color_profile: bool = True" }, { "identifier": "get_logger", "path": "ahcore/utils/io.py", "snippet": "def get_logger(name: str = __name__) -> logging.Logger:\n \"\"\"Initializes multi-GPU-friendly python command line logger.\"\"\"\n\n logger = logging.getLogger(name)\n\n # this ensures all logging levels get marked with the rank zero decorator\n # otherwise logs would get multiplied for each GPU process in multi-GPU setup\n for level in (\n \"debug\",\n \"info\",\n \"warning\",\n \"error\",\n \"exception\",\n \"fatal\",\n \"critical\",\n ):\n setattr(logger, level, rank_zero_only(getattr(logger, level)))\n\n return logger" }, { "identifier": "DataManager", "path": "ahcore/utils/manifest.py", "snippet": "class DataManager:\n def __init__(self, database_uri: str) -> None:\n self._database_uri = database_uri\n self.__session: Optional[Session] = None\n self._logger = get_logger(type(self).__name__)\n\n @property\n def _session(self) -> Session:\n if self.__session is None:\n self.__session = open_db(self._database_uri)\n return self.__session\n\n @staticmethod\n def _ensure_record(record: Any, description: str) -> None:\n \"\"\"Raises an error if the record is None.\"\"\"\n if not record:\n raise RecordNotFoundError(f\"{description} not found.\")\n\n def get_records_by_split(\n self,\n manifest_name: str,\n split_version: str,\n split_category: Optional[str] = None,\n ) -> Generator[Patient, None, None]:\n manifest = self._session.query(Manifest).filter_by(name=manifest_name).first()\n self._ensure_record(manifest, f\"Manifest with name {manifest_name}\")\n\n split_definition = self._session.query(SplitDefinitions).filter_by(version=split_version).first()\n self._ensure_record(split_definition, f\"Split definition with version {split_version}\")\n\n # This is because mypy is complaining otherwise,\n # but _ensure_record effectively ensures that the record is not None\n assert manifest is not None\n assert split_definition is not None\n query = (\n self._session.query(Patient)\n .join(Split)\n .filter(\n Patient.manifest_id == manifest.id,\n Split.split_definition_id == split_definition.id,\n )\n )\n\n if split_category is not None:\n split_category_key = get_enum_key_from_value(split_category, CategoryEnum)\n query = query.filter(Split.category == split_category_key)\n\n patients = query.all()\n\n self._logger.info(\n f\"Found {len(patients)} patients for split {split_category if split_category else 'all categories'}\"\n )\n for patient in patients:\n yield patient\n\n def get_image_metadata_by_split(\n self,\n manifest_name: str,\n split_version: str,\n split_category: Optional[str] = None,\n ) -> Generator[ImageMetadata, None, None]:\n \"\"\"\n Yields the metadata of images for a given manifest name, split version, and optional split category.\n\n Parameters\n ----------\n manifest_name : str\n The name of the manifest.\n split_version : str\n The version of the split.\n split_category : Optional[str], default=None\n The category of the split (e.g., \"fit\", \"validate\", \"test\").\n\n Yields\n -------\n ImageMetadata\n The metadata of the image.\n \"\"\"\n for patient in self.get_records_by_split(manifest_name, split_version, split_category):\n for image in patient.images:\n yield fetch_image_metadata(image)\n\n def get_image_metadata_by_patient(self, patient_code: str) -> list[ImageMetadata]:\n \"\"\"\n Fetch the metadata for the images associated with a specific patient.\n\n Parameters\n ----------\n patient_code : str\n The unique code of the patient.\n\n Returns\n -------\n list[ImageData]\n A list of metadata for all images associated with the patient.\n \"\"\"\n patient = self._session.query(Patient).filter_by(patient_code=patient_code).first()\n self._ensure_record(patient, f\"Patient with code {patient_code} not found\")\n assert patient is not None # for mypy\n return [fetch_image_metadata(image) for image in patient.images]\n\n def get_image_by_filename(self, filename: str) -> Image:\n \"\"\"\n Fetch the metadata for an image based on its filename.\n\n Parameters\n ----------\n filename : str\n The filename of the image.\n\n Returns\n -------\n Image\n The image from the database.\n \"\"\"\n image = self._session.query(Image).filter_by(filename=filename).first()\n self._ensure_record(image, f\"Image with filename {filename} not found\")\n assert image\n return image\n\n def get_image_metadata_by_id(self, image_id: int) -> ImageMetadata:\n \"\"\"\n Fetch the metadata for an image based on its ID.\n\n Parameters\n ----------\n image_id : int\n The ID of the image.\n\n Returns\n -------\n ImageMetadata\n Metadata of the image.\n \"\"\"\n image = self._session.query(Image).filter_by(id=image_id).first()\n self._ensure_record(image, f\"No image found with ID {image_id}\")\n assert image is not None # mypy\n return fetch_image_metadata(image)\n\n def __enter__(self) -> \"DataManager\":\n return self\n\n def __exit__(\n self,\n exc_type: Optional[Type[BaseException]],\n exc_val: Optional[BaseException],\n exc_tb: Optional[TracebackType],\n ) -> Literal[False]:\n if self._session is not None:\n self.close()\n return False\n\n def close(self) -> None:\n if self.__session is not None:\n self.__session.close()\n self.__session = None" }, { "identifier": "ImageMetadata", "path": "ahcore/utils/manifest.py", "snippet": "class ImageMetadata(BaseModel):\n \"\"\"Model to hold image metadata\"\"\"\n\n class Config:\n frozen = True\n\n filename: Path\n height: PositiveInt\n width: PositiveInt\n mpp: PositiveFloat" }, { "identifier": "fetch_image_metadata", "path": "ahcore/utils/manifest.py", "snippet": "def fetch_image_metadata(image: Image) -> ImageMetadata:\n \"\"\"Extract metadata from an Image object.\"\"\"\n return ImageMetadata(\n filename=Path(image.filename),\n height=int(image.height),\n width=int(image.width),\n mpp=float(image.mpp),\n )" }, { "identifier": "get_mask_and_annotations_from_record", "path": "ahcore/utils/manifest.py", "snippet": "def get_mask_and_annotations_from_record(\n annotations_root: Path, record: Image\n) -> tuple[_AnnotationReturnTypes | None, _AnnotationReturnTypes | None]:\n \"\"\"\n Get the mask and annotations from a record of type Image.\n\n Parameters\n ----------\n annotations_root : Path\n The root directory of the annotations.\n record : Type[Image]\n The record containing the mask and annotations.\n\n Returns\n -------\n tuple[WsiAnnotations, WsiAnnotations]\n The mask and annotations.\n \"\"\"\n _masks = parse_annotations_from_record(annotations_root, record.masks)\n _annotations = parse_annotations_from_record(annotations_root, record.annotations)\n return _masks, _annotations" } ]
import itertools import json import multiprocessing import time import pytorch_lightning as pl import torch from collections import namedtuple from multiprocessing.pool import Pool from pathlib import Path from typing import Any, Generator, Optional, cast from pytorch_lightning import Callback from ahcore.callbacks import WriteH5Callback from ahcore.lit_module import AhCoreLightningModule from ahcore.metrics import WSIMetricFactory from ahcore.readers import H5FileImageReader, StitchingMode from ahcore.utils.callbacks import _get_h5_output_filename, _ValidationDataset from ahcore.utils.data import DataDescription from ahcore.utils.io import get_logger from ahcore.utils.manifest import DataManager, ImageMetadata, fetch_image_metadata, get_mask_and_annotations_from_record
11,415
from __future__ import annotations logger = get_logger(__name__) class ComputeWsiMetricsCallback(Callback): def __init__(self, max_processes: int = 10, save_per_image: bool = True) -> None: """ Callback to compute metrics on whole-slide images. This callback is used to compute metrics on whole-slide images in separate processes. Parameters ---------- max_processes : int The maximum number of concurrent processes. """ self._data_description: Optional[DataDescription] = None self._reader = H5FileImageReader self._max_processes: int = max_processes self._dump_dir: Optional[Path] = None self._save_per_image = save_per_image self._filenames: dict[Path, Path] = {} self._wsi_metrics: WSIMetricFactory | None = None self._class_names: dict[int, str] = {} self._data_manager = None self._validate_filenames_gen = None self._model_name: str | None = None self._validate_metadata_gen: Generator[ImageMetadata, None, None] | None = None self._dump_list: list[dict[str, str]] = [] self._logger = get_logger(type(self).__name__) def setup( self, trainer: pl.Trainer, pl_module: pl.LightningModule, stage: Optional[str] = None, ) -> None:
from __future__ import annotations logger = get_logger(__name__) class ComputeWsiMetricsCallback(Callback): def __init__(self, max_processes: int = 10, save_per_image: bool = True) -> None: """ Callback to compute metrics on whole-slide images. This callback is used to compute metrics on whole-slide images in separate processes. Parameters ---------- max_processes : int The maximum number of concurrent processes. """ self._data_description: Optional[DataDescription] = None self._reader = H5FileImageReader self._max_processes: int = max_processes self._dump_dir: Optional[Path] = None self._save_per_image = save_per_image self._filenames: dict[Path, Path] = {} self._wsi_metrics: WSIMetricFactory | None = None self._class_names: dict[int, str] = {} self._data_manager = None self._validate_filenames_gen = None self._model_name: str | None = None self._validate_metadata_gen: Generator[ImageMetadata, None, None] | None = None self._dump_list: list[dict[str, str]] = [] self._logger = get_logger(type(self).__name__) def setup( self, trainer: pl.Trainer, pl_module: pl.LightningModule, stage: Optional[str] = None, ) -> None:
if not isinstance(pl_module, AhCoreLightningModule):
1
2023-10-14 18:04:12+00:00
16k
fury-05/BookRecomendApp
.pythonlibs/lib/python3.10/site-packages/sklearn/cluster/_dbscan.py
[ { "identifier": "BaseEstimator", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/base.py", "snippet": "class BaseEstimator(_MetadataRequester):\n \"\"\"Base class for all estimators in scikit-learn.\n\n Notes\n -----\n All estimators should specify all the parameters that can be set\n at the class level in their ``__init__`` as explicit keyword\n arguments (no ``*args`` or ``**kwargs``).\n \"\"\"\n\n @classmethod\n def _get_param_names(cls):\n \"\"\"Get parameter names for the estimator\"\"\"\n # fetch the constructor or the original constructor before\n # deprecation wrapping if any\n init = getattr(cls.__init__, \"deprecated_original\", cls.__init__)\n if init is object.__init__:\n # No explicit constructor to introspect\n return []\n\n # introspect the constructor arguments to find the model parameters\n # to represent\n init_signature = inspect.signature(init)\n # Consider the constructor parameters excluding 'self'\n parameters = [\n p\n for p in init_signature.parameters.values()\n if p.name != \"self\" and p.kind != p.VAR_KEYWORD\n ]\n for p in parameters:\n if p.kind == p.VAR_POSITIONAL:\n raise RuntimeError(\n \"scikit-learn estimators should always \"\n \"specify their parameters in the signature\"\n \" of their __init__ (no varargs).\"\n \" %s with constructor %s doesn't \"\n \" follow this convention.\" % (cls, init_signature)\n )\n # Extract and sort argument names excluding 'self'\n return sorted([p.name for p in parameters])\n\n def get_params(self, deep=True):\n \"\"\"\n Get parameters for this estimator.\n\n Parameters\n ----------\n deep : bool, default=True\n If True, will return the parameters for this estimator and\n contained subobjects that are estimators.\n\n Returns\n -------\n params : dict\n Parameter names mapped to their values.\n \"\"\"\n out = dict()\n for key in self._get_param_names():\n value = getattr(self, key)\n if deep and hasattr(value, \"get_params\") and not isinstance(value, type):\n deep_items = value.get_params().items()\n out.update((key + \"__\" + k, val) for k, val in deep_items)\n out[key] = value\n return out\n\n def set_params(self, **params):\n \"\"\"Set the parameters of this estimator.\n\n The method works on simple estimators as well as on nested objects\n (such as :class:`~sklearn.pipeline.Pipeline`). The latter have\n parameters of the form ``<component>__<parameter>`` so that it's\n possible to update each component of a nested object.\n\n Parameters\n ----------\n **params : dict\n Estimator parameters.\n\n Returns\n -------\n self : estimator instance\n Estimator instance.\n \"\"\"\n if not params:\n # Simple optimization to gain speed (inspect is slow)\n return self\n valid_params = self.get_params(deep=True)\n\n nested_params = defaultdict(dict) # grouped by prefix\n for key, value in params.items():\n key, delim, sub_key = key.partition(\"__\")\n if key not in valid_params:\n local_valid_params = self._get_param_names()\n raise ValueError(\n f\"Invalid parameter {key!r} for estimator {self}. \"\n f\"Valid parameters are: {local_valid_params!r}.\"\n )\n\n if delim:\n nested_params[key][sub_key] = value\n else:\n setattr(self, key, value)\n valid_params[key] = value\n\n for key, sub_params in nested_params.items():\n # TODO(1.4): remove specific handling of \"base_estimator\".\n # The \"base_estimator\" key is special. It was deprecated and\n # renamed to \"estimator\" for several estimators. This means we\n # need to translate it here and set sub-parameters on \"estimator\",\n # but only if the user did not explicitly set a value for\n # \"base_estimator\".\n if (\n key == \"base_estimator\"\n and valid_params[key] == \"deprecated\"\n and self.__module__.startswith(\"sklearn.\")\n ):\n warnings.warn(\n (\n f\"Parameter 'base_estimator' of {self.__class__.__name__} is\"\n \" deprecated in favor of 'estimator'. See\"\n f\" {self.__class__.__name__}'s docstring for more details.\"\n ),\n FutureWarning,\n stacklevel=2,\n )\n key = \"estimator\"\n valid_params[key].set_params(**sub_params)\n\n return self\n\n def __sklearn_clone__(self):\n return _clone_parametrized(self)\n\n def __repr__(self, N_CHAR_MAX=700):\n # N_CHAR_MAX is the (approximate) maximum number of non-blank\n # characters to render. We pass it as an optional parameter to ease\n # the tests.\n\n from .utils._pprint import _EstimatorPrettyPrinter\n\n N_MAX_ELEMENTS_TO_SHOW = 30 # number of elements to show in sequences\n\n # use ellipsis for sequences with a lot of elements\n pp = _EstimatorPrettyPrinter(\n compact=True,\n indent=1,\n indent_at_name=True,\n n_max_elements_to_show=N_MAX_ELEMENTS_TO_SHOW,\n )\n\n repr_ = pp.pformat(self)\n\n # Use bruteforce ellipsis when there are a lot of non-blank characters\n n_nonblank = len(\"\".join(repr_.split()))\n if n_nonblank > N_CHAR_MAX:\n lim = N_CHAR_MAX // 2 # apprx number of chars to keep on both ends\n regex = r\"^(\\s*\\S){%d}\" % lim\n # The regex '^(\\s*\\S){%d}' % n\n # matches from the start of the string until the nth non-blank\n # character:\n # - ^ matches the start of string\n # - (pattern){n} matches n repetitions of pattern\n # - \\s*\\S matches a non-blank char following zero or more blanks\n left_lim = re.match(regex, repr_).end()\n right_lim = re.match(regex, repr_[::-1]).end()\n\n if \"\\n\" in repr_[left_lim:-right_lim]:\n # The left side and right side aren't on the same line.\n # To avoid weird cuts, e.g.:\n # categoric...ore',\n # we need to start the right side with an appropriate newline\n # character so that it renders properly as:\n # categoric...\n # handle_unknown='ignore',\n # so we add [^\\n]*\\n which matches until the next \\n\n regex += r\"[^\\n]*\\n\"\n right_lim = re.match(regex, repr_[::-1]).end()\n\n ellipsis = \"...\"\n if left_lim + len(ellipsis) < len(repr_) - right_lim:\n # Only add ellipsis if it results in a shorter repr\n repr_ = repr_[:left_lim] + \"...\" + repr_[-right_lim:]\n\n return repr_\n\n def __getstate__(self):\n if getattr(self, \"__slots__\", None):\n raise TypeError(\n \"You cannot use `__slots__` in objects inheriting from \"\n \"`sklearn.base.BaseEstimator`.\"\n )\n\n try:\n state = super().__getstate__()\n if state is None:\n # For Python 3.11+, empty instance (no `__slots__`,\n # and `__dict__`) will return a state equal to `None`.\n state = self.__dict__.copy()\n except AttributeError:\n # Python < 3.11\n state = self.__dict__.copy()\n\n if type(self).__module__.startswith(\"sklearn.\"):\n return dict(state.items(), _sklearn_version=__version__)\n else:\n return state\n\n def __setstate__(self, state):\n if type(self).__module__.startswith(\"sklearn.\"):\n pickle_version = state.pop(\"_sklearn_version\", \"pre-0.18\")\n if pickle_version != __version__:\n warnings.warn(\n InconsistentVersionWarning(\n estimator_name=self.__class__.__name__,\n current_sklearn_version=__version__,\n original_sklearn_version=pickle_version,\n ),\n )\n try:\n super().__setstate__(state)\n except AttributeError:\n self.__dict__.update(state)\n\n def _more_tags(self):\n return _DEFAULT_TAGS\n\n def _get_tags(self):\n collected_tags = {}\n for base_class in reversed(inspect.getmro(self.__class__)):\n if hasattr(base_class, \"_more_tags\"):\n # need the if because mixins might not have _more_tags\n # but might do redundant work in estimators\n # (i.e. calling more tags on BaseEstimator multiple times)\n more_tags = base_class._more_tags(self)\n collected_tags.update(more_tags)\n return collected_tags\n\n def _check_n_features(self, X, reset):\n \"\"\"Set the `n_features_in_` attribute, or check against it.\n\n Parameters\n ----------\n X : {ndarray, sparse matrix} of shape (n_samples, n_features)\n The input samples.\n reset : bool\n If True, the `n_features_in_` attribute is set to `X.shape[1]`.\n If False and the attribute exists, then check that it is equal to\n `X.shape[1]`. If False and the attribute does *not* exist, then\n the check is skipped.\n .. note::\n It is recommended to call reset=True in `fit` and in the first\n call to `partial_fit`. All other methods that validate `X`\n should set `reset=False`.\n \"\"\"\n try:\n n_features = _num_features(X)\n except TypeError as e:\n if not reset and hasattr(self, \"n_features_in_\"):\n raise ValueError(\n \"X does not contain any features, but \"\n f\"{self.__class__.__name__} is expecting \"\n f\"{self.n_features_in_} features\"\n ) from e\n # If the number of features is not defined and reset=True,\n # then we skip this check\n return\n\n if reset:\n self.n_features_in_ = n_features\n return\n\n if not hasattr(self, \"n_features_in_\"):\n # Skip this check if the expected number of expected input features\n # was not recorded by calling fit first. This is typically the case\n # for stateless transformers.\n return\n\n if n_features != self.n_features_in_:\n raise ValueError(\n f\"X has {n_features} features, but {self.__class__.__name__} \"\n f\"is expecting {self.n_features_in_} features as input.\"\n )\n\n def _check_feature_names(self, X, *, reset):\n \"\"\"Set or check the `feature_names_in_` attribute.\n\n .. versionadded:: 1.0\n\n Parameters\n ----------\n X : {ndarray, dataframe} of shape (n_samples, n_features)\n The input samples.\n\n reset : bool\n Whether to reset the `feature_names_in_` attribute.\n If False, the input will be checked for consistency with\n feature names of data provided when reset was last True.\n .. note::\n It is recommended to call `reset=True` in `fit` and in the first\n call to `partial_fit`. All other methods that validate `X`\n should set `reset=False`.\n \"\"\"\n\n if reset:\n feature_names_in = _get_feature_names(X)\n if feature_names_in is not None:\n self.feature_names_in_ = feature_names_in\n elif hasattr(self, \"feature_names_in_\"):\n # Delete the attribute when the estimator is fitted on a new dataset\n # that has no feature names.\n delattr(self, \"feature_names_in_\")\n return\n\n fitted_feature_names = getattr(self, \"feature_names_in_\", None)\n X_feature_names = _get_feature_names(X)\n\n if fitted_feature_names is None and X_feature_names is None:\n # no feature names seen in fit and in X\n return\n\n if X_feature_names is not None and fitted_feature_names is None:\n warnings.warn(\n f\"X has feature names, but {self.__class__.__name__} was fitted without\"\n \" feature names\"\n )\n return\n\n if X_feature_names is None and fitted_feature_names is not None:\n warnings.warn(\n \"X does not have valid feature names, but\"\n f\" {self.__class__.__name__} was fitted with feature names\"\n )\n return\n\n # validate the feature names against the `feature_names_in_` attribute\n if len(fitted_feature_names) != len(X_feature_names) or np.any(\n fitted_feature_names != X_feature_names\n ):\n message = (\n \"The feature names should match those that were passed during fit.\\n\"\n )\n fitted_feature_names_set = set(fitted_feature_names)\n X_feature_names_set = set(X_feature_names)\n\n unexpected_names = sorted(X_feature_names_set - fitted_feature_names_set)\n missing_names = sorted(fitted_feature_names_set - X_feature_names_set)\n\n def add_names(names):\n output = \"\"\n max_n_names = 5\n for i, name in enumerate(names):\n if i >= max_n_names:\n output += \"- ...\\n\"\n break\n output += f\"- {name}\\n\"\n return output\n\n if unexpected_names:\n message += \"Feature names unseen at fit time:\\n\"\n message += add_names(unexpected_names)\n\n if missing_names:\n message += \"Feature names seen at fit time, yet now missing:\\n\"\n message += add_names(missing_names)\n\n if not missing_names and not unexpected_names:\n message += (\n \"Feature names must be in the same order as they were in fit.\\n\"\n )\n\n raise ValueError(message)\n\n def _validate_data(\n self,\n X=\"no_validation\",\n y=\"no_validation\",\n reset=True,\n validate_separately=False,\n cast_to_ndarray=True,\n **check_params,\n ):\n \"\"\"Validate input data and set or check the `n_features_in_` attribute.\n\n Parameters\n ----------\n X : {array-like, sparse matrix, dataframe} of shape \\\n (n_samples, n_features), default='no validation'\n The input samples.\n If `'no_validation'`, no validation is performed on `X`. This is\n useful for meta-estimator which can delegate input validation to\n their underlying estimator(s). In that case `y` must be passed and\n the only accepted `check_params` are `multi_output` and\n `y_numeric`.\n\n y : array-like of shape (n_samples,), default='no_validation'\n The targets.\n\n - If `None`, `check_array` is called on `X`. If the estimator's\n requires_y tag is True, then an error will be raised.\n - If `'no_validation'`, `check_array` is called on `X` and the\n estimator's requires_y tag is ignored. This is a default\n placeholder and is never meant to be explicitly set. In that case\n `X` must be passed.\n - Otherwise, only `y` with `_check_y` or both `X` and `y` are\n checked with either `check_array` or `check_X_y` depending on\n `validate_separately`.\n\n reset : bool, default=True\n Whether to reset the `n_features_in_` attribute.\n If False, the input will be checked for consistency with data\n provided when reset was last True.\n .. note::\n It is recommended to call reset=True in `fit` and in the first\n call to `partial_fit`. All other methods that validate `X`\n should set `reset=False`.\n\n validate_separately : False or tuple of dicts, default=False\n Only used if y is not None.\n If False, call validate_X_y(). Else, it must be a tuple of kwargs\n to be used for calling check_array() on X and y respectively.\n\n `estimator=self` is automatically added to these dicts to generate\n more informative error message in case of invalid input data.\n\n cast_to_ndarray : bool, default=True\n Cast `X` and `y` to ndarray with checks in `check_params`. If\n `False`, `X` and `y` are unchanged and only `feature_names_in_` and\n `n_features_in_` are checked.\n\n **check_params : kwargs\n Parameters passed to :func:`sklearn.utils.check_array` or\n :func:`sklearn.utils.check_X_y`. Ignored if validate_separately\n is not False.\n\n `estimator=self` is automatically added to these params to generate\n more informative error message in case of invalid input data.\n\n Returns\n -------\n out : {ndarray, sparse matrix} or tuple of these\n The validated input. A tuple is returned if both `X` and `y` are\n validated.\n \"\"\"\n self._check_feature_names(X, reset=reset)\n\n if y is None and self._get_tags()[\"requires_y\"]:\n raise ValueError(\n f\"This {self.__class__.__name__} estimator \"\n \"requires y to be passed, but the target y is None.\"\n )\n\n no_val_X = isinstance(X, str) and X == \"no_validation\"\n no_val_y = y is None or isinstance(y, str) and y == \"no_validation\"\n\n if no_val_X and no_val_y:\n raise ValueError(\"Validation should be done on X, y or both.\")\n\n default_check_params = {\"estimator\": self}\n check_params = {**default_check_params, **check_params}\n\n if not cast_to_ndarray:\n if not no_val_X and no_val_y:\n out = X\n elif no_val_X and not no_val_y:\n out = y\n else:\n out = X, y\n elif not no_val_X and no_val_y:\n out = check_array(X, input_name=\"X\", **check_params)\n elif no_val_X and not no_val_y:\n out = _check_y(y, **check_params)\n else:\n if validate_separately:\n # We need this because some estimators validate X and y\n # separately, and in general, separately calling check_array()\n # on X and y isn't equivalent to just calling check_X_y()\n # :(\n check_X_params, check_y_params = validate_separately\n if \"estimator\" not in check_X_params:\n check_X_params = {**default_check_params, **check_X_params}\n X = check_array(X, input_name=\"X\", **check_X_params)\n if \"estimator\" not in check_y_params:\n check_y_params = {**default_check_params, **check_y_params}\n y = check_array(y, input_name=\"y\", **check_y_params)\n else:\n X, y = check_X_y(X, y, **check_params)\n out = X, y\n\n if not no_val_X and check_params.get(\"ensure_2d\", True):\n self._check_n_features(X, reset=reset)\n\n return out\n\n def _validate_params(self):\n \"\"\"Validate types and values of constructor parameters\n\n The expected type and values must be defined in the `_parameter_constraints`\n class attribute, which is a dictionary `param_name: list of constraints`. See\n the docstring of `validate_parameter_constraints` for a description of the\n accepted constraints.\n \"\"\"\n validate_parameter_constraints(\n self._parameter_constraints,\n self.get_params(deep=False),\n caller_name=self.__class__.__name__,\n )\n\n @property\n def _repr_html_(self):\n \"\"\"HTML representation of estimator.\n\n This is redundant with the logic of `_repr_mimebundle_`. The latter\n should be favorted in the long term, `_repr_html_` is only\n implemented for consumers who do not interpret `_repr_mimbundle_`.\n \"\"\"\n if get_config()[\"display\"] != \"diagram\":\n raise AttributeError(\n \"_repr_html_ is only defined when the \"\n \"'display' configuration option is set to \"\n \"'diagram'\"\n )\n return self._repr_html_inner\n\n def _repr_html_inner(self):\n \"\"\"This function is returned by the @property `_repr_html_` to make\n `hasattr(estimator, \"_repr_html_\") return `True` or `False` depending\n on `get_config()[\"display\"]`.\n \"\"\"\n return estimator_html_repr(self)\n\n def _repr_mimebundle_(self, **kwargs):\n \"\"\"Mime bundle used by jupyter kernels to display estimator\"\"\"\n output = {\"text/plain\": repr(self)}\n if get_config()[\"display\"] == \"diagram\":\n output[\"text/html\"] = estimator_html_repr(self)\n return output" }, { "identifier": "ClusterMixin", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/base.py", "snippet": "class ClusterMixin:\n \"\"\"Mixin class for all cluster estimators in scikit-learn.\"\"\"\n\n _estimator_type = \"clusterer\"\n\n def fit_predict(self, X, y=None):\n \"\"\"\n Perform clustering on `X` and returns cluster labels.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input data.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n labels : ndarray of shape (n_samples,), dtype=np.int64\n Cluster labels.\n \"\"\"\n # non-optimized default implementation; override when a better\n # method is possible for a given clustering algorithm\n self.fit(X)\n return self.labels_\n\n def _more_tags(self):\n return {\"preserves_dtype\": []}" }, { "identifier": "_fit_context", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/base.py", "snippet": "def _fit_context(*, prefer_skip_nested_validation):\n \"\"\"Decorator to run the fit methods of estimators within context managers.\n\n Parameters\n ----------\n prefer_skip_nested_validation : bool\n If True, the validation of parameters of inner estimators or functions\n called during fit will be skipped.\n\n This is useful to avoid validating many times the parameters passed by the\n user from the public facing API. It's also useful to avoid validating\n parameters that we pass internally to inner functions that are guaranteed to\n be valid by the test suite.\n\n It should be set to True for most estimators, except for those that receive\n non-validated objects as parameters, such as meta-estimators that are given\n estimator objects.\n\n Returns\n -------\n decorated_fit : method\n The decorated fit method.\n \"\"\"\n\n def decorator(fit_method):\n @functools.wraps(fit_method)\n def wrapper(estimator, *args, **kwargs):\n global_skip_validation = get_config()[\"skip_parameter_validation\"]\n\n # we don't want to validate again for each call to partial_fit\n partial_fit_and_fitted = (\n fit_method.__name__ == \"partial_fit\" and _is_fitted(estimator)\n )\n\n if not global_skip_validation and not partial_fit_and_fitted:\n estimator._validate_params()\n\n with config_context(\n skip_parameter_validation=(\n prefer_skip_nested_validation or global_skip_validation\n )\n ):\n return fit_method(estimator, *args, **kwargs)\n\n return wrapper\n\n return decorator" }, { "identifier": "_VALID_METRICS", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/metrics/pairwise.py", "snippet": "_VALID_METRICS = [\n \"euclidean\",\n \"l2\",\n \"l1\",\n \"manhattan\",\n \"cityblock\",\n \"braycurtis\",\n \"canberra\",\n \"chebyshev\",\n \"correlation\",\n \"cosine\",\n \"dice\",\n \"hamming\",\n \"jaccard\",\n \"mahalanobis\",\n \"matching\",\n \"minkowski\",\n \"rogerstanimoto\",\n \"russellrao\",\n \"seuclidean\",\n \"sokalmichener\",\n \"sokalsneath\",\n \"sqeuclidean\",\n \"yule\",\n \"wminkowski\",\n \"nan_euclidean\",\n \"haversine\",\n]" }, { "identifier": "NearestNeighbors", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/neighbors/_unsupervised.py", "snippet": "class NearestNeighbors(KNeighborsMixin, RadiusNeighborsMixin, NeighborsBase):\n \"\"\"Unsupervised learner for implementing neighbor searches.\n\n Read more in the :ref:`User Guide <unsupervised_neighbors>`.\n\n .. versionadded:: 0.9\n\n Parameters\n ----------\n n_neighbors : int, default=5\n Number of neighbors to use by default for :meth:`kneighbors` queries.\n\n radius : float, default=1.0\n Range of parameter space to use by default for :meth:`radius_neighbors`\n queries.\n\n algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'\n Algorithm used to compute the nearest neighbors:\n\n - 'ball_tree' will use :class:`BallTree`\n - 'kd_tree' will use :class:`KDTree`\n - 'brute' will use a brute-force search.\n - 'auto' will attempt to decide the most appropriate algorithm\n based on the values passed to :meth:`fit` method.\n\n Note: fitting on sparse input will override the setting of\n this parameter, using brute force.\n\n leaf_size : int, default=30\n Leaf size passed to BallTree or KDTree. This can affect the\n speed of the construction and query, as well as the memory\n required to store the tree. The optimal value depends on the\n nature of the problem.\n\n metric : str or callable, default='minkowski'\n Metric to use for distance computation. Default is \"minkowski\", which\n results in the standard Euclidean distance when p = 2. See the\n documentation of `scipy.spatial.distance\n <https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and\n the metrics listed in\n :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric\n values.\n\n If metric is \"precomputed\", X is assumed to be a distance matrix and\n must be square during fit. X may be a :term:`sparse graph`, in which\n case only \"nonzero\" elements may be considered neighbors.\n\n If metric is a callable function, it takes two arrays representing 1D\n vectors as inputs and must return one value indicating the distance\n between those vectors. This works for Scipy's metrics, but is less\n efficient than passing the metric name as a string.\n\n p : float, default=2\n Parameter for the Minkowski metric from\n sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is\n equivalent to using manhattan_distance (l1), and euclidean_distance\n (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.\n\n metric_params : dict, default=None\n Additional keyword arguments for the metric function.\n\n n_jobs : int, default=None\n The number of parallel jobs to run for neighbors search.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n Attributes\n ----------\n effective_metric_ : str\n Metric used to compute distances to neighbors.\n\n effective_metric_params_ : dict\n Parameters for the metric used to compute distances to neighbors.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n n_samples_fit_ : int\n Number of samples in the fitted data.\n\n See Also\n --------\n KNeighborsClassifier : Classifier implementing the k-nearest neighbors\n vote.\n RadiusNeighborsClassifier : Classifier implementing a vote among neighbors\n within a given radius.\n KNeighborsRegressor : Regression based on k-nearest neighbors.\n RadiusNeighborsRegressor : Regression based on neighbors within a fixed\n radius.\n BallTree : Space partitioning data structure for organizing points in a\n multi-dimensional space, used for nearest neighbor search.\n\n Notes\n -----\n See :ref:`Nearest Neighbors <neighbors>` in the online documentation\n for a discussion of the choice of ``algorithm`` and ``leaf_size``.\n\n https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.neighbors import NearestNeighbors\n >>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]\n >>> neigh = NearestNeighbors(n_neighbors=2, radius=0.4)\n >>> neigh.fit(samples)\n NearestNeighbors(...)\n >>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)\n array([[2, 0]]...)\n >>> nbrs = neigh.radius_neighbors(\n ... [[0, 0, 1.3]], 0.4, return_distance=False\n ... )\n >>> np.asarray(nbrs[0][0])\n array(2)\n \"\"\"\n\n def __init__(\n self,\n *,\n n_neighbors=5,\n radius=1.0,\n algorithm=\"auto\",\n leaf_size=30,\n metric=\"minkowski\",\n p=2,\n metric_params=None,\n n_jobs=None,\n ):\n super().__init__(\n n_neighbors=n_neighbors,\n radius=radius,\n algorithm=algorithm,\n leaf_size=leaf_size,\n metric=metric,\n p=p,\n metric_params=metric_params,\n n_jobs=n_jobs,\n )\n\n @_fit_context(\n # NearestNeighbors.metric is not validated yet\n prefer_skip_nested_validation=False\n )\n def fit(self, X, y=None):\n \"\"\"Fit the nearest neighbors estimator from the training dataset.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features) or \\\n (n_samples, n_samples) if metric='precomputed'\n Training data.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : NearestNeighbors\n The fitted nearest neighbors estimator.\n \"\"\"\n return self._fit(X)" }, { "identifier": "Interval", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/_param_validation.py", "snippet": "class Interval(_Constraint):\n \"\"\"Constraint representing a typed interval.\n\n Parameters\n ----------\n type : {numbers.Integral, numbers.Real, RealNotInt}\n The set of numbers in which to set the interval.\n\n If RealNotInt, only reals that don't have the integer type\n are allowed. For example 1.0 is allowed but 1 is not.\n\n left : float or int or None\n The left bound of the interval. None means left bound is -∞.\n\n right : float, int or None\n The right bound of the interval. None means right bound is +∞.\n\n closed : {\"left\", \"right\", \"both\", \"neither\"}\n Whether the interval is open or closed. Possible choices are:\n\n - `\"left\"`: the interval is closed on the left and open on the right.\n It is equivalent to the interval `[ left, right )`.\n - `\"right\"`: the interval is closed on the right and open on the left.\n It is equivalent to the interval `( left, right ]`.\n - `\"both\"`: the interval is closed.\n It is equivalent to the interval `[ left, right ]`.\n - `\"neither\"`: the interval is open.\n It is equivalent to the interval `( left, right )`.\n\n Notes\n -----\n Setting a bound to `None` and setting the interval closed is valid. For instance,\n strictly speaking, `Interval(Real, 0, None, closed=\"both\")` corresponds to\n `[0, +∞) U {+∞}`.\n \"\"\"\n\n def __init__(self, type, left, right, *, closed):\n super().__init__()\n self.type = type\n self.left = left\n self.right = right\n self.closed = closed\n\n self._check_params()\n\n def _check_params(self):\n if self.type not in (Integral, Real, RealNotInt):\n raise ValueError(\n \"type must be either numbers.Integral, numbers.Real or RealNotInt.\"\n f\" Got {self.type} instead.\"\n )\n\n if self.closed not in (\"left\", \"right\", \"both\", \"neither\"):\n raise ValueError(\n \"closed must be either 'left', 'right', 'both' or 'neither'. \"\n f\"Got {self.closed} instead.\"\n )\n\n if self.type is Integral:\n suffix = \"for an interval over the integers.\"\n if self.left is not None and not isinstance(self.left, Integral):\n raise TypeError(f\"Expecting left to be an int {suffix}\")\n if self.right is not None and not isinstance(self.right, Integral):\n raise TypeError(f\"Expecting right to be an int {suffix}\")\n if self.left is None and self.closed in (\"left\", \"both\"):\n raise ValueError(\n f\"left can't be None when closed == {self.closed} {suffix}\"\n )\n if self.right is None and self.closed in (\"right\", \"both\"):\n raise ValueError(\n f\"right can't be None when closed == {self.closed} {suffix}\"\n )\n else:\n if self.left is not None and not isinstance(self.left, Real):\n raise TypeError(\"Expecting left to be a real number.\")\n if self.right is not None and not isinstance(self.right, Real):\n raise TypeError(\"Expecting right to be a real number.\")\n\n if self.right is not None and self.left is not None and self.right <= self.left:\n raise ValueError(\n f\"right can't be less than left. Got left={self.left} and \"\n f\"right={self.right}\"\n )\n\n def __contains__(self, val):\n if np.isnan(val):\n return False\n\n left_cmp = operator.lt if self.closed in (\"left\", \"both\") else operator.le\n right_cmp = operator.gt if self.closed in (\"right\", \"both\") else operator.ge\n\n left = -np.inf if self.left is None else self.left\n right = np.inf if self.right is None else self.right\n\n if left_cmp(val, left):\n return False\n if right_cmp(val, right):\n return False\n return True\n\n def is_satisfied_by(self, val):\n if not isinstance(val, self.type):\n return False\n\n return val in self\n\n def __str__(self):\n type_str = \"an int\" if self.type is Integral else \"a float\"\n left_bracket = \"[\" if self.closed in (\"left\", \"both\") else \"(\"\n left_bound = \"-inf\" if self.left is None else self.left\n right_bound = \"inf\" if self.right is None else self.right\n right_bracket = \"]\" if self.closed in (\"right\", \"both\") else \")\"\n\n # better repr if the bounds were given as integers\n if not self.type == Integral and isinstance(self.left, Real):\n left_bound = float(left_bound)\n if not self.type == Integral and isinstance(self.right, Real):\n right_bound = float(right_bound)\n\n return (\n f\"{type_str} in the range \"\n f\"{left_bracket}{left_bound}, {right_bound}{right_bracket}\"\n )" }, { "identifier": "StrOptions", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/_param_validation.py", "snippet": "class StrOptions(Options):\n \"\"\"Constraint representing a finite set of strings.\n\n Parameters\n ----------\n options : set of str\n The set of valid strings.\n\n deprecated : set of str or None, default=None\n A subset of the `options` to mark as deprecated in the string\n representation of the constraint.\n \"\"\"\n\n def __init__(self, options, *, deprecated=None):\n super().__init__(type=str, options=options, deprecated=deprecated)" }, { "identifier": "_check_sample_weight", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/validation.py", "snippet": "def _check_sample_weight(\n sample_weight, X, dtype=None, copy=False, only_non_negative=False\n):\n \"\"\"Validate sample weights.\n\n Note that passing sample_weight=None will output an array of ones.\n Therefore, in some cases, you may want to protect the call with:\n if sample_weight is not None:\n sample_weight = _check_sample_weight(...)\n\n Parameters\n ----------\n sample_weight : {ndarray, Number or None}, shape (n_samples,)\n Input sample weights.\n\n X : {ndarray, list, sparse matrix}\n Input data.\n\n only_non_negative : bool, default=False,\n Whether or not the weights are expected to be non-negative.\n\n .. versionadded:: 1.0\n\n dtype : dtype, default=None\n dtype of the validated `sample_weight`.\n If None, and the input `sample_weight` is an array, the dtype of the\n input is preserved; otherwise an array with the default numpy dtype\n is be allocated. If `dtype` is not one of `float32`, `float64`,\n `None`, the output will be of dtype `float64`.\n\n copy : bool, default=False\n If True, a copy of sample_weight will be created.\n\n Returns\n -------\n sample_weight : ndarray of shape (n_samples,)\n Validated sample weight. It is guaranteed to be \"C\" contiguous.\n \"\"\"\n n_samples = _num_samples(X)\n\n if dtype is not None and dtype not in [np.float32, np.float64]:\n dtype = np.float64\n\n if sample_weight is None:\n sample_weight = np.ones(n_samples, dtype=dtype)\n elif isinstance(sample_weight, numbers.Number):\n sample_weight = np.full(n_samples, sample_weight, dtype=dtype)\n else:\n if dtype is None:\n dtype = [np.float64, np.float32]\n sample_weight = check_array(\n sample_weight,\n accept_sparse=False,\n ensure_2d=False,\n dtype=dtype,\n order=\"C\",\n copy=copy,\n input_name=\"sample_weight\",\n )\n if sample_weight.ndim != 1:\n raise ValueError(\"Sample weights must be 1D array or scalar\")\n\n if sample_weight.shape != (n_samples,):\n raise ValueError(\n \"sample_weight.shape == {}, expected {}!\".format(\n sample_weight.shape, (n_samples,)\n )\n )\n\n if only_non_negative:\n check_non_negative(sample_weight, \"`sample_weight`\")\n\n return sample_weight" } ]
import warnings import numpy as np from numbers import Integral, Real from scipy import sparse from ..base import BaseEstimator, ClusterMixin, _fit_context from ..metrics.pairwise import _VALID_METRICS from ..neighbors import NearestNeighbors from ..utils._param_validation import Interval, StrOptions from ..utils.validation import _check_sample_weight from ._dbscan_inner import dbscan_inner
11,087
eps : float, default=0.5 The maximum distance between two samples for one to be considered as in the neighborhood of the other. This is not a maximum bound on the distances of points within a cluster. This is the most important DBSCAN parameter to choose appropriately for your data set and distance function. min_samples : int, default=5 The number of samples (or total weight) in a neighborhood for a point to be considered as a core point. This includes the point itself. metric : str or callable, default='minkowski' The metric to use when calculating distance between instances in a feature array. If metric is a string or callable, it must be one of the options allowed by :func:`sklearn.metrics.pairwise_distances` for its metric parameter. If metric is "precomputed", X is assumed to be a distance matrix and must be square during fit. X may be a :term:`sparse graph <sparse graph>`, in which case only "nonzero" elements may be considered neighbors. metric_params : dict, default=None Additional keyword arguments for the metric function. .. versionadded:: 0.19 algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' The algorithm to be used by the NearestNeighbors module to compute pointwise distances and find nearest neighbors. See NearestNeighbors module documentation for details. leaf_size : int, default=30 Leaf size passed to BallTree or cKDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem. p : float, default=2 The power of the Minkowski metric to be used to calculate distance between points. sample_weight : array-like of shape (n_samples,), default=None Weight of each sample, such that a sample with a weight of at least ``min_samples`` is by itself a core sample; a sample with negative weight may inhibit its eps-neighbor from being core. Note that weights are absolute, and default to 1. n_jobs : int, default=None The number of parallel jobs to run for neighbors search. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. If precomputed distance are used, parallel execution is not available and thus n_jobs will have no effect. Returns ------- core_samples : ndarray of shape (n_core_samples,) Indices of core samples. labels : ndarray of shape (n_samples,) Cluster labels for each point. Noisy samples are given the label -1. See Also -------- DBSCAN : An estimator interface for this clustering algorithm. OPTICS : A similar estimator interface clustering at multiple values of eps. Our implementation is optimized for memory usage. Notes ----- For an example, see :ref:`examples/cluster/plot_dbscan.py <sphx_glr_auto_examples_cluster_plot_dbscan.py>`. This implementation bulk-computes all neighborhood queries, which increases the memory complexity to O(n.d) where d is the average number of neighbors, while original DBSCAN had memory complexity O(n). It may attract a higher memory complexity when querying these nearest neighborhoods, depending on the ``algorithm``. One way to avoid the query complexity is to pre-compute sparse neighborhoods in chunks using :func:`NearestNeighbors.radius_neighbors_graph <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with ``mode='distance'``, then using ``metric='precomputed'`` here. Another way to reduce memory and computation time is to remove (near-)duplicate points and use ``sample_weight`` instead. :class:`~sklearn.cluster.OPTICS` provides a similar clustering with lower memory usage. References ---------- Ester, M., H. P. Kriegel, J. Sander, and X. Xu, `"A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise" <https://www.dbs.ifi.lmu.de/Publikationen/Papers/KDD-96.final.frame.pdf>`_. In: Proceedings of the 2nd International Conference on Knowledge Discovery and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996 Schubert, E., Sander, J., Ester, M., Kriegel, H. P., & Xu, X. (2017). :doi:`"DBSCAN revisited, revisited: why and how you should (still) use DBSCAN." <10.1145/3068335>` ACM Transactions on Database Systems (TODS), 42(3), 19. """ est = DBSCAN( eps=eps, min_samples=min_samples, metric=metric, metric_params=metric_params, algorithm=algorithm, leaf_size=leaf_size, p=p, n_jobs=n_jobs, ) est.fit(X, sample_weight=sample_weight) return est.core_sample_indices_, est.labels_
""" DBSCAN: Density-Based Spatial Clustering of Applications with Noise """ # Author: Robert Layton <[email protected]> # Joel Nothman <[email protected]> # Lars Buitinck # # License: BSD 3 clause def dbscan( X, eps=0.5, *, min_samples=5, metric="minkowski", metric_params=None, algorithm="auto", leaf_size=30, p=2, sample_weight=None, n_jobs=None, ): """Perform DBSCAN clustering from vector array or distance matrix. Read more in the :ref:`User Guide <dbscan>`. Parameters ---------- X : {array-like, sparse (CSR) matrix} of shape (n_samples, n_features) or \ (n_samples, n_samples) A feature array, or array of distances between samples if ``metric='precomputed'``. eps : float, default=0.5 The maximum distance between two samples for one to be considered as in the neighborhood of the other. This is not a maximum bound on the distances of points within a cluster. This is the most important DBSCAN parameter to choose appropriately for your data set and distance function. min_samples : int, default=5 The number of samples (or total weight) in a neighborhood for a point to be considered as a core point. This includes the point itself. metric : str or callable, default='minkowski' The metric to use when calculating distance between instances in a feature array. If metric is a string or callable, it must be one of the options allowed by :func:`sklearn.metrics.pairwise_distances` for its metric parameter. If metric is "precomputed", X is assumed to be a distance matrix and must be square during fit. X may be a :term:`sparse graph <sparse graph>`, in which case only "nonzero" elements may be considered neighbors. metric_params : dict, default=None Additional keyword arguments for the metric function. .. versionadded:: 0.19 algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' The algorithm to be used by the NearestNeighbors module to compute pointwise distances and find nearest neighbors. See NearestNeighbors module documentation for details. leaf_size : int, default=30 Leaf size passed to BallTree or cKDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem. p : float, default=2 The power of the Minkowski metric to be used to calculate distance between points. sample_weight : array-like of shape (n_samples,), default=None Weight of each sample, such that a sample with a weight of at least ``min_samples`` is by itself a core sample; a sample with negative weight may inhibit its eps-neighbor from being core. Note that weights are absolute, and default to 1. n_jobs : int, default=None The number of parallel jobs to run for neighbors search. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. If precomputed distance are used, parallel execution is not available and thus n_jobs will have no effect. Returns ------- core_samples : ndarray of shape (n_core_samples,) Indices of core samples. labels : ndarray of shape (n_samples,) Cluster labels for each point. Noisy samples are given the label -1. See Also -------- DBSCAN : An estimator interface for this clustering algorithm. OPTICS : A similar estimator interface clustering at multiple values of eps. Our implementation is optimized for memory usage. Notes ----- For an example, see :ref:`examples/cluster/plot_dbscan.py <sphx_glr_auto_examples_cluster_plot_dbscan.py>`. This implementation bulk-computes all neighborhood queries, which increases the memory complexity to O(n.d) where d is the average number of neighbors, while original DBSCAN had memory complexity O(n). It may attract a higher memory complexity when querying these nearest neighborhoods, depending on the ``algorithm``. One way to avoid the query complexity is to pre-compute sparse neighborhoods in chunks using :func:`NearestNeighbors.radius_neighbors_graph <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with ``mode='distance'``, then using ``metric='precomputed'`` here. Another way to reduce memory and computation time is to remove (near-)duplicate points and use ``sample_weight`` instead. :class:`~sklearn.cluster.OPTICS` provides a similar clustering with lower memory usage. References ---------- Ester, M., H. P. Kriegel, J. Sander, and X. Xu, `"A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise" <https://www.dbs.ifi.lmu.de/Publikationen/Papers/KDD-96.final.frame.pdf>`_. In: Proceedings of the 2nd International Conference on Knowledge Discovery and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996 Schubert, E., Sander, J., Ester, M., Kriegel, H. P., & Xu, X. (2017). :doi:`"DBSCAN revisited, revisited: why and how you should (still) use DBSCAN." <10.1145/3068335>` ACM Transactions on Database Systems (TODS), 42(3), 19. """ est = DBSCAN( eps=eps, min_samples=min_samples, metric=metric, metric_params=metric_params, algorithm=algorithm, leaf_size=leaf_size, p=p, n_jobs=n_jobs, ) est.fit(X, sample_weight=sample_weight) return est.core_sample_indices_, est.labels_
class DBSCAN(ClusterMixin, BaseEstimator):
0
2023-10-07 13:19:48+00:00
16k
hellloxiaotian/KDNet
test_ccpd.py
[ { "identifier": "attempt_load", "path": "models/experimental.py", "snippet": "def attempt_load(weights, map_location=None):\n # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a\n model = Ensemble()\n # print('weights', weights) # /runs/train/yolov7_distillation19/weights/epoch_074.pt\n for w in weights if isinstance(weights, list) else [weights]:\n # attempt_download(w) # /runs/train/yolov7_distillation19/weights/epoch_074.pt\n ckpt = torch.load(w, map_location=map_location) # load\n model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model\n \n # Compatibility updates\n for m in model.modules():\n if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:\n m.inplace = True # pytorch 1.7.0 compatibility\n elif type(m) is nn.Upsample:\n m.recompute_scale_factor = None # torch 1.11.0 compatibility\n elif type(m) is Conv:\n m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility\n \n if len(model) == 1:\n return model[-1] # return model\n else:\n print('Ensemble created with %s\\n' % weights)\n for k in ['names', 'stride']:\n setattr(model, k, getattr(model[-1], k))\n return model # return ensemble" }, { "identifier": "create_dataloader", "path": "utils/datasets.py", "snippet": "def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,\n rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''):\n # Make sure only the first process in DDP process the dataset first, and the following others can use the cache\n with torch_distributed_zero_first(rank):\n dataset = LoadImagesAndLabels(path, imgsz, batch_size,\n augment=augment, # augment images\n hyp=hyp, # augmentation hyperparameters\n rect=rect, # rectangular training\n cache_images=cache,\n single_cls=opt.single_cls,\n stride=int(stride),\n pad=pad,\n image_weights=image_weights,\n prefix=prefix)\n\n batch_size = min(batch_size, len(dataset))\n nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers\n sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None\n loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader\n # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()\n dataloader = loader(dataset,\n batch_size=batch_size,\n num_workers=nw,\n sampler=sampler,\n pin_memory=True,\n collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)\n return dataloader, dataset" }, { "identifier": "coco80_to_coco91_class", "path": "utils/general.py", "snippet": "def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)\n # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/\n # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\\n')\n # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\\n')\n # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco\n # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet\n x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,\n 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,\n 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]\n return x" }, { "identifier": "check_dataset", "path": "utils/general.py", "snippet": "def check_dataset(dict):\n # Download dataset if not found locally\n val, s = dict.get('val'), dict.get('download')\n if val and len(val):\n val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path\n if not all(x.exists() for x in val):\n print('\\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()])\n if s and len(s): # download script\n print('Downloading %s ...' % s)\n if s.startswith('http') and s.endswith('.zip'): # URL\n f = Path(s).name # filename\n torch.hub.download_url_to_file(s, f)\n r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip\n else: # bash script\n r = os.system(s)\n print('Dataset autodownload %s\\n' % ('success' if r == 0 else 'failure')) # analyze return value\n else:\n raise Exception('Dataset not found.')" }, { "identifier": "check_file", "path": "utils/general.py", "snippet": "def check_file(file):\n # Search for file if not found\n if Path(file).is_file() or file == '':\n return file\n else:\n files = glob.glob('./**/' + file, recursive=True) # find file\n assert len(files), f'File Not Found: {file}' # assert file was found\n assert len(files) == 1, f\"Multiple files match '{file}', specify exact path: {files}\" # assert unique\n return files[0] # return file" }, { "identifier": "check_img_size", "path": "utils/general.py", "snippet": "def check_img_size(img_size, s=32):\n # Verify img_size is a multiple of stride s\n new_size = make_divisible(img_size, int(s)) # ceil gs-multiple\n if new_size != img_size:\n print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))\n return new_size" }, { "identifier": "check_requirements", "path": "utils/general.py", "snippet": "def check_requirements(requirements='requirements.txt', exclude=()):\n # Check installed dependencies meet requirements (pass *.txt file or list of packages)\n import pkg_resources as pkg\n prefix = colorstr('red', 'bold', 'requirements:')\n if isinstance(requirements, (str, Path)): # requirements.txt file\n file = Path(requirements)\n if not file.exists():\n print(f\"{prefix} {file.resolve()} not found, check failed.\")\n return\n requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude]\n else: # list or tuple of packages\n requirements = [x for x in requirements if x not in exclude]\n\n n = 0 # number of packages updates\n for r in requirements:\n try:\n pkg.require(r)\n except Exception as e: # DistributionNotFound or VersionConflict if requirements not met\n n += 1\n print(f\"{prefix} {e.req} not found and is required by YOLOR, attempting auto-update...\")\n print(subprocess.check_output(f\"pip install '{e.req}'\", shell=True).decode())\n\n if n: # if packages updated\n source = file.resolve() if 'file' in locals() else requirements\n s = f\"{prefix} {n} package{'s' * (n > 1)} updated per {source}\\n\" \\\n f\"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\\n\"\n print(emojis(s)) # emoji-safe" }, { "identifier": "box_iou", "path": "utils/general.py", "snippet": "def box_iou(box1, box2):\n # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n Arguments:\n box1 (Tensor[N, 4])\n box2 (Tensor[M, 4])\n Returns:\n iou (Tensor[N, M]): the NxM matrix containing the pairwise\n IoU values for every element in boxes1 and boxes2\n \"\"\"\n\n def box_area(box):\n # box = 4xn\n return (box[2] - box[0]) * (box[3] - box[1])\n\n area1 = box_area(box1.T)\n area2 = box_area(box2.T)\n\n # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)\n inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)\n return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)" }, { "identifier": "non_max_suppression", "path": "utils/general.py", "snippet": "def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,\n labels=()):\n \"\"\"Runs Non-Maximum Suppression (NMS) on inference results\n\n Returns:\n list of detections, on (n,6) tensor per image [xyxy, conf, cls]\n \"\"\"\n\n nc = prediction.shape[2] - 5 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n\n # Settings\n min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n max_det = 300 # maximum number of detections per image\n max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()\n time_limit = 10.0 # seconds to quit after\n redundant = True # require redundant detections\n multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)\n merge = False # use merge-NMS\n\n t = time.time()\n output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence\n\n # Cat apriori labels if autolabelling\n if labels and len(labels[xi]):\n l = labels[xi]\n v = torch.zeros((len(l), nc + 5), device=x.device)\n v[:, :4] = l[:, 1:5] # box\n v[:, 4] = 1.0 # conf\n v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls\n x = torch.cat((x, v), 0)\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n if nc == 1:\n x[:, 5:] = x[:, 4:5] # for models with one class, cls_loss is 0 and cls_conf is always 0.5,\n # so there is no need to multiplicate.\n else:\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = xywh2xyxy(x[:, :4])\n\n # Detections matrix nx6 (xyxy, conf, cls)\n if multi_label:\n i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T\n x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\n else: # best class only\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n\n # Filter by class\n if classes is not None:\n x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n\n # Apply finite constraint\n # if not torch.isfinite(x).all():\n # x = x[torch.isfinite(x).all(1)]\n\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence\n\n # Batched NMS\n c = x[:, 5:6] * (0 if agnostic else max_wh) # classes\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\n i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)\n # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)\n iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix\n weights = iou * scores[None] # box weights\n x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes\n if redundant:\n i = i[iou.sum(1) > 1] # require redundancy\n\n output[xi] = x[i]\n if (time.time() - t) > time_limit:\n print(f'WARNING: NMS time limit {time_limit}s exceeded')\n break # time limit exceeded\n\n return output" }, { "identifier": "scale_coords", "path": "utils/general.py", "snippet": "def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):\n # Rescale coords (xyxy) from img1_shape to img0_shape\n if ratio_pad is None: # calculate from img0_shape\n gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new\n pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding\n else:\n gain = ratio_pad[0][0]\n pad = ratio_pad[1]\n\n coords[:, [0, 2]] -= pad[0] # x padding\n coords[:, [1, 3]] -= pad[1] # y padding\n coords[:, :4] /= gain\n clip_coords(coords, img0_shape)\n return coords" }, { "identifier": "xyxy2xywh", "path": "utils/general.py", "snippet": "def xyxy2xywh(x):\n # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center\n y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center\n y[:, 2] = x[:, 2] - x[:, 0] # width\n y[:, 3] = x[:, 3] - x[:, 1] # height\n return y" }, { "identifier": "xywh2xyxy", "path": "utils/general.py", "snippet": "def xywh2xyxy(x):\n # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x\n y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y\n y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x\n y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y\n return y" }, { "identifier": "set_logging", "path": "utils/general.py", "snippet": "def set_logging(rank=-1):\n logging.basicConfig(\n format=\"%(message)s\",\n level=logging.INFO if rank in [-1, 0] else logging.WARN)" }, { "identifier": "increment_path", "path": "utils/general.py", "snippet": "def increment_path(path, exist_ok=True, sep=''):\n # Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc.\n path = Path(path) # os-agnostic\n if (path.exists() and exist_ok) or (not path.exists()):\n return str(path)\n else:\n dirs = glob.glob(f\"{path}{sep}*\") # similar paths\n matches = [re.search(rf\"%s{sep}(\\d+)\" % path.stem, d) for d in dirs]\n i = [int(m.groups()[0]) for m in matches if m] # indices\n n = max(i) + 1 if i else 2 # increment number\n return f\"{path}{sep}{n}\" # update path" }, { "identifier": "colorstr", "path": "utils/general.py", "snippet": "def colorstr(*input):\n # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')\n *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string\n colors = {'black': '\\033[30m', # basic colors\n 'red': '\\033[31m',\n 'green': '\\033[32m',\n 'yellow': '\\033[33m',\n 'blue': '\\033[34m',\n 'magenta': '\\033[35m',\n 'cyan': '\\033[36m',\n 'white': '\\033[37m',\n 'bright_black': '\\033[90m', # bright colors\n 'bright_red': '\\033[91m',\n 'bright_green': '\\033[92m',\n 'bright_yellow': '\\033[93m',\n 'bright_blue': '\\033[94m',\n 'bright_magenta': '\\033[95m',\n 'bright_cyan': '\\033[96m',\n 'bright_white': '\\033[97m',\n 'end': '\\033[0m', # misc\n 'bold': '\\033[1m',\n 'underline': '\\033[4m'}\n return ''.join(colors[x] for x in args) + f'{string}' + colors['end']" }, { "identifier": "ap_per_class", "path": "utils/metrics.py", "snippet": "def ap_per_class(tp, conf, pred_cls, target_cls, v5_metric=False, plot=False, save_dir='.', names=()):\n \"\"\" Compute the average precision, given the recall and precision curves.\n Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.\n # Arguments\n tp: True positives (nparray, nx1 or nx10).\n conf: Objectness value from 0-1 (nparray).\n pred_cls: Predicted object classes (nparray).\n target_cls: True object classes (nparray).\n plot: Plot precision-recall curve at [email protected]\n save_dir: Plot save directory\n # Returns\n The average precision as computed in py-faster-rcnn.\n \"\"\"\n # Sort by objectness\n i = np.argsort(-conf)\n tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]\n\n # Find unique classes\n unique_classes = np.unique(target_cls)\n nc = unique_classes.shape[0] # number of classes, number of detections\n\n # Create Precision-Recall curve and compute AP for each class\n px, py = np.linspace(0, 1, 1000), [] # for plotting\n ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000))\n for ci, c in enumerate(unique_classes):\n i = pred_cls == c\n n_l = (target_cls == c).sum() # number of labels\n n_p = i.sum() # number of predictions\n\n if n_p == 0 or n_l == 0:\n continue\n else:\n # Accumulate FPs and TPs\n fpc = (1 - tp[i]).cumsum(0)\n tpc = tp[i].cumsum(0)\n\n # Recall\n recall = tpc / (n_l + 1e-16) # recall curve\n r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases\n\n # Precision\n precision = tpc / (tpc + fpc) # precision curve\n p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score\n\n # AP from recall-precision curve\n for j in range(tp.shape[1]):\n ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j], v5_metric=v5_metric)\n if plot and j == 0:\n py.append(np.interp(px, mrec, mpre)) # precision at [email protected]\n\n # Compute F1 (harmonic mean of precision and recall)\n f1 = 2 * p * r / (p + r + 1e-16)\n if plot:\n plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names)\n plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1')\n plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision')\n plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall')\n\n i = f1.mean(0).argmax() # max F1 index\n return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32')" }, { "identifier": "ConfusionMatrix", "path": "utils/metrics.py", "snippet": "class ConfusionMatrix:\n # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix\n def __init__(self, nc, conf=0.25, iou_thres=0.45):\n self.matrix = np.zeros((nc + 1, nc + 1))\n self.nc = nc # number of classes\n self.conf = conf\n self.iou_thres = iou_thres\n\n def process_batch(self, detections, labels):\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n Arguments:\n detections (Array[N, 6]), x1, y1, x2, y2, conf, class\n labels (Array[M, 5]), class, x1, y1, x2, y2\n Returns:\n None, updates confusion matrix accordingly\n \"\"\"\n detections = detections[detections[:, 4] > self.conf]\n gt_classes = labels[:, 0].int()\n detection_classes = detections[:, 5].int()\n iou = general.box_iou(labels[:, 1:], detections[:, :4])\n\n x = torch.where(iou > self.iou_thres)\n if x[0].shape[0]:\n matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()\n if x[0].shape[0] > 1:\n matches = matches[matches[:, 2].argsort()[::-1]]\n matches = matches[np.unique(matches[:, 1], return_index=True)[1]]\n matches = matches[matches[:, 2].argsort()[::-1]]\n matches = matches[np.unique(matches[:, 0], return_index=True)[1]]\n else:\n matches = np.zeros((0, 3))\n\n n = matches.shape[0] > 0\n m0, m1, _ = matches.transpose().astype(np.int16)\n for i, gc in enumerate(gt_classes):\n j = m0 == i\n if n and sum(j) == 1:\n self.matrix[gc, detection_classes[m1[j]]] += 1 # correct\n else:\n self.matrix[self.nc, gc] += 1 # background FP\n\n if n:\n for i, dc in enumerate(detection_classes):\n if not any(m1 == i):\n self.matrix[dc, self.nc] += 1 # background FN\n\n def matrix(self):\n return self.matrix\n\n def plot(self, save_dir='', names=()):\n try:\n import seaborn as sn\n\n array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6) # normalize\n array[array < 0.005] = np.nan # don't annotate (would appear as 0.00)\n\n fig = plt.figure(figsize=(12, 9), tight_layout=True)\n sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size\n labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels\n sn.heatmap(array, annot=self.nc < 30, annot_kws={\"size\": 8}, cmap='Blues', fmt='.2f', square=True,\n xticklabels=names + ['background FP'] if labels else \"auto\",\n yticklabels=names + ['background FN'] if labels else \"auto\").set_facecolor((1, 1, 1))\n fig.axes[0].set_xlabel('True')\n fig.axes[0].set_ylabel('Predicted')\n fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250)\n except Exception as e:\n pass\n\n def print(self):\n for i in range(self.nc + 1):\n print(' '.join(map(str, self.matrix[i])))" }, { "identifier": "plot_images", "path": "utils/plots.py", "snippet": "def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):\n # Plot image grid with labels\n\n if isinstance(images, torch.Tensor):\n images = images.cpu().float().numpy()\n if isinstance(targets, torch.Tensor):\n targets = targets.cpu().numpy()\n\n # un-normalise\n if np.max(images[0]) <= 1:\n images *= 255\n\n tl = 3 # line thickness\n tf = max(tl - 1, 1) # font thickness\n bs, _, h, w = images.shape # batch size, _, height, width\n bs = min(bs, max_subplots) # limit plot images\n ns = np.ceil(bs ** 0.5) # number of subplots (square)\n\n # Check if we should resize\n scale_factor = max_size / max(h, w)\n if scale_factor < 1:\n h = math.ceil(scale_factor * h)\n w = math.ceil(scale_factor * w)\n\n colors = color_list() # list of colors\n mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init\n for i, img in enumerate(images):\n if i == max_subplots: # if last batch has fewer images than we expect\n break\n\n block_x = int(w * (i // ns))\n block_y = int(h * (i % ns))\n\n img = img.transpose(1, 2, 0)\n if scale_factor < 1:\n img = cv2.resize(img, (w, h))\n\n mosaic[block_y:block_y + h, block_x:block_x + w, :] = img\n if len(targets) > 0:\n image_targets = targets[targets[:, 0] == i]\n boxes = xywh2xyxy(image_targets[:, 2:6]).T\n classes = image_targets[:, 1].astype('int')\n labels = image_targets.shape[1] == 6 # labels if no conf column\n conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred)\n\n if boxes.shape[1]:\n if boxes.max() <= 1.01: # if normalized with tolerance 0.01\n boxes[[0, 2]] *= w # scale to pixels\n boxes[[1, 3]] *= h\n elif scale_factor < 1: # absolute coords need scale if image scales\n boxes *= scale_factor\n boxes[[0, 2]] += block_x\n boxes[[1, 3]] += block_y\n for j, box in enumerate(boxes.T):\n cls = int(classes[j])\n color = colors[cls % len(colors)]\n cls = names[cls] if names else cls\n if labels or conf[j] > 0.25: # 0.25 conf thresh\n label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j])\n plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)\n\n # Draw image filename labels\n if paths:\n label = Path(paths[i]).name[:40] # trim to 40 char\n t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]\n cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,\n lineType=cv2.LINE_AA)\n\n # Image border\n cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)\n\n if fname:\n r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size\n mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA)\n # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save\n Image.fromarray(mosaic).save(fname) # PIL save\n return mosaic" }, { "identifier": "output_to_target", "path": "utils/plots.py", "snippet": "def output_to_target(output):\n # Convert model output to target format [batch_id, class_id, x, y, w, h, conf]\n targets = []\n for i, o in enumerate(output):\n for *box, conf, cls in o.cpu().numpy():\n targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf])\n return np.array(targets)" }, { "identifier": "plot_study_txt", "path": "utils/plots.py", "snippet": "def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt()\n # Plot study.txt generated by test.py\n fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)\n # ax = ax.ravel()\n\n fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)\n # for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolor-p6', 'yolor-w6', 'yolor-e6', 'yolor-d6']]:\n for f in sorted(Path(path).glob('study*.txt')):\n y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T\n x = np.arange(y.shape[1]) if x is None else np.array(x)\n s = ['P', 'R', '[email protected]', '[email protected]:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)']\n # for i in range(7):\n # ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)\n # ax[i].set_title(s[i])\n\n j = y[3].argmax() + 1\n ax2.plot(y[6, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8,\n label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))\n\n ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],\n 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')\n\n ax2.grid(alpha=0.2)\n ax2.set_yticks(np.arange(20, 60, 5))\n ax2.set_xlim(0, 57)\n ax2.set_ylim(30, 55)\n ax2.set_xlabel('GPU Speed (ms/img)')\n ax2.set_ylabel('COCO AP val')\n ax2.legend(loc='lower right')\n plt.savefig(str(Path(path).name) + '.png', dpi=300)" }, { "identifier": "select_device", "path": "utils/torch_utils.py", "snippet": "def select_device(device='', batch_size=None):\n # device = 'cpu' or '0' or '0,1,2,3'\n s = f'YOLOR 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string\n cpu = device.lower() == 'cpu'\n if cpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable\n assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability\n\n cuda = not cpu and torch.cuda.is_available()\n if cuda:\n n = torch.cuda.device_count()\n if n > 1 and batch_size: # check that batch_size is compatible with device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * len(s)\n for i, d in enumerate(device.split(',') if device else range(n)):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\\n\" # bytes to MB\n else:\n s += 'CPU\\n'\n\n logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe\n return torch.device('cuda:0' if cuda else 'cpu')" }, { "identifier": "time_synchronized", "path": "utils/torch_utils.py", "snippet": "def time_synchronized():\n # pytorch-accurate time\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n return time.time()" }, { "identifier": "TracedModel", "path": "utils/torch_utils.py", "snippet": "class TracedModel(nn.Module):\n\n def __init__(self, model=None, device=None, img_size=(640,640)): \n super(TracedModel, self).__init__()\n \n print(\" Convert model to Traced-model... \") \n self.stride = model.stride\n self.names = model.names\n self.model = model\n\n self.model = revert_sync_batchnorm(self.model)\n self.model.to('cpu')\n self.model.eval()\n\n self.detect_layer = self.model.model[-1]\n self.model.traced = True\n \n rand_example = torch.rand(1, 3, img_size, img_size)\n \n traced_script_module = torch.jit.trace(self.model, rand_example, strict=False)\n #traced_script_module = torch.jit.script(self.model)\n traced_script_module.save(\"traced_model.pt\")\n print(\" traced_script_module saved! \")\n self.model = traced_script_module\n self.model.to(device)\n self.detect_layer.to(device)\n print(\" model is traced! \\n\") \n\n def forward(self, x, augment=False, profile=False):\n out = self.model(x)\n out = self.detect_layer(out)\n return out" } ]
import argparse import json import os import numpy as np import torch import yaml from pathlib import Path from threading import Thread from tqdm import tqdm from models.experimental import attempt_load from utils.datasets import create_dataloader from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, check_requirements, \ box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, colorstr from utils.metrics import ap_per_class, ConfusionMatrix from utils.plots import plot_images, output_to_target, plot_study_txt from utils.torch_utils import select_device, time_synchronized, TracedModel from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval
11,871
coco91class = coco80_to_coco91_class() s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', '[email protected]', '[email protected]:.95') p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0. loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): img = img.to(device, non_blocking=True) img = img.half() if half else img.float() # uint8 to fp16/32 img /= 255.0 # 0 - 255 to 0.0 - 1.0 targets = targets.to(device) nb, _, height, width = img.shape # batch size, channels, height, width with torch.no_grad(): # Run model t = time_synchronized() out, train_out = model(img, augment=augment) # inference and training outputs t0 += time_synchronized() - t # Compute loss if compute_loss: loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls # Run NMS targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling t = time_synchronized() out = non_max_suppression(out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb, multi_label=True) t1 += time_synchronized() - t # Statistics per image for si, pred in enumerate(out): labels = targets[targets[:, 0] == si, 1:] nl = len(labels) tcls = labels[:, 0].tolist() if nl else [] # target class path = Path(paths[si]) seen += 1 if len(pred) == 0: if nl: stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls)) continue # Predictions predn = pred.clone() scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred # Append to text file if save_txt: gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') # W&B logging - Media Panel Plots if len(wandb_images) < log_imgs and wandb_logger.current_epoch > 0: # Check for test operation if wandb_logger.current_epoch % wandb_logger.bbox_interval == 0: box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, "class_id": int(cls), "box_caption": "%s %.3f" % (names[cls], conf), "scores": {"class_score": conf}, "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space wandb_images.append(wandb_logger.wandb.Image(img[si], boxes=boxes, caption=path.name)) wandb_logger.log_training_progress(predn, path, names) if wandb_logger and wandb_logger.wandb_run else None # Append to pycocotools JSON dictionary if save_json: # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ... image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for p, b in zip(pred.tolist(), box.tolist()): jdict.append({'image_id': image_id, 'category_id': coco91class[int(p[5])] if is_coco else int(p[5]), 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5)}) # Assign all predictions as incorrect correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device) if nl: detected = [] # target indices tcls_tensor = labels[:, 0] # target boxes tbox = xywh2xyxy(labels[:, 1:5]) scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels if plots: confusion_matrix.process_batch(predn, torch.cat((labels[:, 0:1], tbox), 1)) # Per target class for cls in torch.unique(tcls_tensor): ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices # Search for detections if pi.shape[0]: # Prediction to target ious ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices # Append detections detected_set = set() for j in (ious > iouv[0]).nonzero(as_tuple=False): d = ti[i[j]] # detected target if d.item() not in detected_set: detected_set.add(d.item()) detected.append(d) correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn if len(detected) == nl: # all targets already located in image break # Append statistics (correct, conf, pcls, tcls) stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # Plot images if plots and batch_i < 3: f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start() f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions
def test(data, weights=None, batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, # for NMS save_json=False, single_cls=False, augment=False, verbose=False, model=None, dataloader=None, save_dir=Path(''), # for saving images save_txt=False, # for auto-labelling save_hybrid=False, # for hybrid auto-labelling save_conf=False, # save auto-label confidences plots=True, wandb_logger=None, compute_loss=None, half_precision=True, trace=False, is_coco=False, v5_metric=False): # Initialize/load model and set device training = model is not None if training: # called by train.py device = next(model.parameters()).device # get model device else: # called directly set_logging() device = select_device(opt.device, batch_size=batch_size) # Directories save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = attempt_load(weights, map_location=device) # load FP32 model gs = max(int(model.stride.max()), 32) # grid size (max stride) imgsz = check_img_size(imgsz, s=gs) # check img_size if trace: model = TracedModel(model, device, imgsz) # Half half = device.type != 'cpu' and half_precision # half precision only supported on CUDA if half: model.half() # Configure model.eval() if isinstance(data, str): is_coco = data.endswith('coco.yaml') with open(data) as f: data = yaml.load(f, Loader=yaml.SafeLoader) check_dataset(data) # check nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for [email protected]:0.95 niou = iouv.numel() # Logging log_imgs = 0 if wandb_logger and wandb_logger.wandb: log_imgs = min(wandb_logger.log_imgs, 100) # Dataloader if not training: if device.type != 'cpu': model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once task = opt.task if opt.task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, batch_size, gs, opt, pad=0.5, rect=True, prefix=colorstr(f'{task}: '))[0] if v5_metric: print("Testing with YOLOv5 AP metric...") seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} coco91class = coco80_to_coco91_class() s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', '[email protected]', '[email protected]:.95') p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0. loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): img = img.to(device, non_blocking=True) img = img.half() if half else img.float() # uint8 to fp16/32 img /= 255.0 # 0 - 255 to 0.0 - 1.0 targets = targets.to(device) nb, _, height, width = img.shape # batch size, channels, height, width with torch.no_grad(): # Run model t = time_synchronized() out, train_out = model(img, augment=augment) # inference and training outputs t0 += time_synchronized() - t # Compute loss if compute_loss: loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls # Run NMS targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling t = time_synchronized() out = non_max_suppression(out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb, multi_label=True) t1 += time_synchronized() - t # Statistics per image for si, pred in enumerate(out): labels = targets[targets[:, 0] == si, 1:] nl = len(labels) tcls = labels[:, 0].tolist() if nl else [] # target class path = Path(paths[si]) seen += 1 if len(pred) == 0: if nl: stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls)) continue # Predictions predn = pred.clone() scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred # Append to text file if save_txt: gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') # W&B logging - Media Panel Plots if len(wandb_images) < log_imgs and wandb_logger.current_epoch > 0: # Check for test operation if wandb_logger.current_epoch % wandb_logger.bbox_interval == 0: box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, "class_id": int(cls), "box_caption": "%s %.3f" % (names[cls], conf), "scores": {"class_score": conf}, "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space wandb_images.append(wandb_logger.wandb.Image(img[si], boxes=boxes, caption=path.name)) wandb_logger.log_training_progress(predn, path, names) if wandb_logger and wandb_logger.wandb_run else None # Append to pycocotools JSON dictionary if save_json: # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ... image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for p, b in zip(pred.tolist(), box.tolist()): jdict.append({'image_id': image_id, 'category_id': coco91class[int(p[5])] if is_coco else int(p[5]), 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5)}) # Assign all predictions as incorrect correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device) if nl: detected = [] # target indices tcls_tensor = labels[:, 0] # target boxes tbox = xywh2xyxy(labels[:, 1:5]) scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels if plots: confusion_matrix.process_batch(predn, torch.cat((labels[:, 0:1], tbox), 1)) # Per target class for cls in torch.unique(tcls_tensor): ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices # Search for detections if pi.shape[0]: # Prediction to target ious ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices # Append detections detected_set = set() for j in (ious > iouv[0]).nonzero(as_tuple=False): d = ti[i[j]] # detected target if d.item() not in detected_set: detected_set.add(d.item()) detected.append(d) correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn if len(detected) == nl: # all targets already located in image break # Append statistics (correct, conf, pcls, tcls) stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # Plot images if plots and batch_i < 3: f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start() f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions
Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start()
18
2023-10-08 13:05:58+00:00
16k
falesiani/torch_ga
torch_ga/layers.py
[ { "identifier": "BladeKind", "path": "torch_ga/blades.py", "snippet": "class BladeKind(Enum):\n \"\"\"Kind of blade depending on its degree.\"\"\"\n MV = \"mv\"\n EVEN = \"even\"\n ODD = \"odd\"\n SCALAR = \"scalar\"\n VECTOR = \"vector\"\n BIVECTOR = \"bivector\"\n TRIVECTOR = \"trivector\"\n PSEUDOSCALAR = \"pseudoscalar\"\n PSEUDOVECTOR = \"pseudovector\"\n PSEUDOBIVECTOR = \"pseudobivector\"\n PSEUDOTRIVECTOR = \"pseudotrivector\"" }, { "identifier": "GeometricAlgebra", "path": "torch_ga/torch_ga.py", "snippet": "class GeometricAlgebra:\n \"\"\"Class used for performing geometric algebra operations on `torch.Tensor` instances.\n Exposes methods for operating on `torch.Tensor` instances where their last\n axis is interpreted as blades of the algebra.\n Holds the metric and other quantities derived from it.\n \"\"\"\n\n def __init__(self, metric: List[float]):\n \"\"\"Creates a GeometricAlgebra object given a metric.\n The algebra will have as many basis vectors as there are\n elements in the metric.\n\n Args:\n metric: Metric as a list. Specifies what basis vectors square to\n \"\"\"\n self._metric = torch.tensor(metric, dtype=torch.float32)\n\n self._num_bases = len(metric)\n self._bases = list(map(str, range(self._num_bases)))\n\n self._blades, self._blade_degrees = blades_from_bases(self._bases)\n self._blade_degrees = torch.tensor(self._blade_degrees)\n self._num_blades = len(self._blades)\n self._max_degree = self._blade_degrees.max()\n\n # [Blades, Blades, Blades]\n _list = get_cayley_tensor(self.metric, self._bases, self._blades)\n # print(_list)\n if type(_list) in [list,tuple]:\n _list = np.array(_list)\n self._cayley, self._cayley_inner, self._cayley_outer = torch.tensor(\n _list,\n dtype=torch.float32\n )\n\n self._blade_mvs = torch.eye(self._num_blades)\n self._basis_mvs = self._blade_mvs[1:1+self._num_bases]\n\n # Find the dual by looking at the anti-diagonal in the Cayley tensor.\n self._dual_blade_indices = []\n self._dual_blade_signs = []\n\n for blade_index in range(self._num_blades):\n dual_index = self.num_blades - blade_index - 1\n anti_diag = self._cayley[blade_index, dual_index]\n # dual_sign = tf.gather(anti_diag, tf.where(\n # anti_diag != 0.0)[..., 0])[..., 0]\n dual_sign = anti_diag[torch.where(anti_diag != 0.0)]\n\n self._dual_blade_indices.append(dual_index)\n self._dual_blade_signs.append(dual_sign)\n\n self._dual_blade_indices = torch.tensor(\n self._dual_blade_indices, dtype=torch.int64)\n self._dual_blade_signs = torch.tensor(\n self._dual_blade_signs, dtype=torch.float32)\n\n def print(self, *args, **kwargs):\n \"\"\"Same as the default `print` function but formats `torch.Tensor`\n instances that have as many elements on their last axis\n as the algebra has blades using `mv_repr()`.\n \"\"\"\n def _is_mv(arg):\n return isinstance(arg, torch.Tensor) and len(arg.shape) > 0 and arg.shape[-1] == self.num_blades\n new_args = [self.mv_repr(arg) if _is_mv(arg) else arg for arg in args]\n\n print(*new_args, **kwargs)\n\n @property\n def metric(self) -> torch.Tensor:\n \"\"\"Metric list which contains the number that each\n basis vector in the algebra squares to\n (ie. the diagonal of the metric tensor).\n \"\"\"\n return self._metric\n\n @property\n def cayley(self) -> torch.Tensor:\n \"\"\"`MxMxM` tensor where `M` is the number of basis\n blades in the algebra. Used for calculating the\n geometric product:\n\n `a_i, b_j, cayley_ijk -> c_k`\n \"\"\"\n return self._cayley\n\n @property\n def cayley_inner(self) -> torch.Tensor:\n \"\"\"Analagous to cayley but for inner product.\"\"\"\n return self._cayley_inner\n\n @property\n def cayley_outer(self) -> torch.Tensor:\n \"\"\"Analagous to cayley but for outer product.\"\"\"\n return self._cayley_outer\n\n @property\n def blades(self) -> List[str]:\n \"\"\"List of all blade names.\n\n Blades are all possible independent combinations of\n basis vectors. Basis vectors are named starting\n from `\"0\"` and counting up. The scalar blade is the\n empty string `\"\"`.\n\n Example\n - Bases: `[\"0\", \"1\", \"2\"]`\n - Blades: `[\"\", \"0\", \"1\", \"2\", \"01\", \"02\", \"12\", \"012\"]`\n \"\"\"\n return self._blades\n\n @property\n def blade_mvs(self) -> torch.Tensor:\n \"\"\"List of all blade tensors in the algebra.\"\"\"\n return self._blade_mvs\n\n @property\n def dual_blade_indices(self) -> torch.Tensor:\n \"\"\"Indices of the dual blades for each blade.\"\"\"\n return self._dual_blade_indices\n\n @property\n def dual_blade_signs(self) -> torch.Tensor:\n \"\"\"Signs of the dual blades for each blade.\"\"\"\n return self._dual_blade_signs\n\n @property\n def num_blades(self) -> int:\n \"\"\"Total number of blades in the algebra.\"\"\"\n return self._num_blades\n\n @property\n def blade_degrees(self) -> torch.Tensor:\n \"\"\"List of blade-degree for each blade in the algebra.\"\"\"\n return self._blade_degrees\n\n @property\n def max_degree(self) -> int:\n \"\"\"Highest blade degree in the algebra.\"\"\"\n return self._max_degree\n\n @property\n def basis_mvs(self) -> torch.Tensor:\n \"\"\"List of basis vectors as torch.Tensor.\"\"\"\n return self._basis_mvs\n\n def get_kind_blade_indices(self, kind: BladeKind, invert: bool = False) -> torch.Tensor:\n \"\"\"Find all indices of blades of a given kind in the algebra.\n\n Args:\n kind: kind of blade to give indices for\n invert: whether to return all blades not of the kind\n\n Returns:\n indices of blades of a given kind in the algebra\n \"\"\"\n return get_blade_of_kind_indices(self.blade_degrees, kind, self.max_degree, invert=invert)\n\n def get_blade_indices_of_degree(self, degree: int) -> torch.Tensor:\n \"\"\"Find all indices of blades of the given degree.\n\n Args:\n degree: degree to return blades for\n\n Returns:\n indices of blades with the given degree in the algebra\n \"\"\"\n # return tf.gather(tf.range(self.num_blades), tf.where(self.blade_degrees == degree)[..., 0])\n return torch.range(self.num_blades)[torch.where(self.blade_degrees == degree)[..., 0]]\n\n def is_pure(self, tensor: torch.Tensor, blade_indices: torch.Tensor) -> bool:\n \"\"\"Returns whether the given tensor is purely of the given blades\n and has no non-zero values for blades not in the given blades.\n\n Args:\n tensor: tensor to check purity for\n blade_indices: blade indices to check purity for\n\n Returns:\n Whether the tensor is purely of the given blades\n and has no non-zero values for blades not in the given blades\n \"\"\"\n # tensor = torch.tensor(tensor, dtype=torch.float32)\n tensor = tensor.to(dtype=torch.float32)\n if not type(blade_indices) in [torch.Tensor]:\n blade_indices = torch.tensor(blade_indices)\n \n blade_indices = blade_indices.to(dtype=torch.int64)\n\n # blade_indices = torch.tensor(\n # blade_indices, dtype=torch.int64)\n\n inverted_blade_indices = invert_blade_indices(\n self.num_blades, blade_indices)\n\n # return tf.reduce_all(tf.gather(\n # tensor,\n # inverted_blade_indices,\n # axis=-1\n # ) == 0)\n return (tensor[inverted_blade_indices]==0).sum(dim=-1)\n\n def is_pure_kind(self, tensor: torch.Tensor, kind: BladeKind) -> bool:\n \"\"\"Returns whether the given tensor is purely of a given kind\n and has no non-zero values for blades not of the kind.\n\n Args:\n tensor: tensor to check purity for\n kind: kind of blade to check purity for\n\n Returns:\n Whether the tensor is purely of a given kind\n and has no non-zero values for blades not of the kind\n \"\"\"\n # tensor = torch.tensor(tensor, dtype=torch.float32)\n tensor = tensor.to(dtype=torch.float32)\n inverted_kind_indices = self.get_kind_blade_indices(kind, invert=True)\n # print(f\"tensor={tensor}\")\n # print(f\"kind={kind}\")\n # print(f\"inverted_kind_indices={inverted_kind_indices.T}\")\n # print(f\"inverted_kind_indices.shape={inverted_kind_indices.shape}\")\n # print(f\"tensor[inverted_kind_indices]={tensor[inverted_kind_indices].T}\")\n # print(f\"tensor[inverted_kind_indices].shape={tensor[inverted_kind_indices].shape}\")\n # print(f\"tensor[inverted_kind_indices]==0={tensor[inverted_kind_indices].T==0}\")\n\n # return tf.reduce_all(tf.gather(\n # tensor,\n # inverted_kind_indices,\n # axis=-1\n # ) == 0)\n return (tensor[inverted_kind_indices]==0).sum(dim=-1)\n\n # def from_tensor(self, tensor: torch.Tensor, blade_indices: torch.Tensor) -> torch.Tensor:\n # \"\"\"Creates a geometric algebra torch.Tensor from a torch.Tensor and blade\n # indices. The blade indices have to align with the last axis of the\n # tensor.\n\n # Args:\n # tensor: torch.Tensor to take as values for the geometric algebra tensor\n # blade_indices: Blade indices corresponding to the tensor. Can\n # be obtained from blade names eg. using get_kind_blade_indices()\n # or as indices from the blades list property.\n\n # Returns:\n # Geometric algebra torch.Tensor from tensor and blade indices\n # \"\"\"\n # blade_indices = torch.tensor(blade_indices, dtype=torch.int64).to(dtype=torch.int64)\n # tensor = torch.tensor(tensor, dtype=torch.float32)\n # # print(f\"blade_indices={blade_indices}\")\n # # print(f\"tensor={tensor}\")\n \n # _shape = tensor.shape\n # is_scalar = False\n # if len(_shape)==1 :\n # _shape_final = [1]+ [self.num_blades] \n # is_scalar = True\n # else:\n # _shape_final = list(_shape[:-1]) + [self.num_blades] \n # b = torch.zeros(_shape_final)\n \n\n # # i = blade_indices.view([-1,1])\n # # v = tensor.flatten().view([-1,1])\n # i = blade_indices.nonzero().flatten()\n # v = tensor.flatten().unsqueeze(1)\n # b = b.view([-1,self.num_blades])\n # # b[:,i] = v\n # try:\n # b[:,i] = v\n # except:\n # print(f\"_shape={_shape},_shape_final={_shape_final}\")\n # print(f\"i.shape={i.shape},v.shape={v.shape},b.shape={b.shape}\")\n # print(f\"i={i},v={v},b={b}\")\n # raise\n # # raise \"whatever\"\n # b = b.reshape(_shape_final)\n\n # # _shape_tmp = list(v.shape) + [self.num_blades] \n # # print(f\"i,v,_shape_tmp,_shape_final={i},{v},{_shape_tmp},{_shape_final},i.shape={i.shape}\")\n # # b = torch.sparse_coo_tensor(i, v, size=_shape_tmp)\n # # print(f\"b={b}\")\n # # b = torch.sparse_coo_tensor(i, v, size=_shape_tmp).to_dense()\n # # b = b.reshape(_shape_final)\n # if is_scalar:\n # b=b.unsqueeze(0)\n # return b\n\n # # # Put last axis on first axis so scatter_nd becomes easier.\n # # # Later undo the transposition again.\n # # # t = tf.concat([[tensor.shape.ndims - 1],\n # # # tf.range(0, tensor.shape.ndims - 1)], axis=0)\n # # # t_inv = tf.concat([tf.range(1, tensor.shape.ndims), [0]], axis=0)\n\n # # # tensor = tf.transpose(tensor, t)\n\n # # # shape = tf.concat([\n # # # torch.tensor([self.num_blades], dtype=torch.int64),\n # # # tf.shape(tensor, torch.int64)[1:]\n # # # ], axis=0)\n\n # # # tensor = tf.scatter_nd(\n # # # tf.expand_dims(blade_indices, axis=-1),\n # # # tensor,\n # # # shape\n # # # )\n\n # # # return tf.transpose(tensor, t_inv)\n # # # t = torch.concat([torch.tensor([len(tensor.shape) - 1]), torch.range(0, len(tensor.shape)- 1)], axis=0)\n # # # t_inv = torch.concat([torch.range(1, len(tensor.shape)), torch.tensor([0])], axis=0)\n # # t = [len(tensor.shape) - 1] + list(range(0, len(tensor.shape)- 1))\n # # t_inv = list(range(1, len(tensor.shape))) + [0]\n\n # # tensor = torch.permute(tensor, t)\n\n # # a= torch.tensor([self.num_blades], dtype=torch.int64)\n # # b = torch.tensor(tensor, dtype=torch.int64)[1:]\n # # print(\"a,b:\", a,b, tensor)\n\n\n # # shape = torch.concat([\n # # torch.tensor([self.num_blades], dtype=torch.int64),\n # # torch.tensor(tensor, dtype=torch.int64)[1:]\n # # ], axis=0)\n\n\n # # # tensor = torch.scatter_nd(\n # # # blade_indices.unsqueeze(-1),\n # # # tensor,\n # # # shape\n # # # )\n # # a = torch.zeros(shape)\n # # a[blade_indices] = tensor\n # # tensor = a\n\n # # return torch.permute(tensor, t_inv) \n \n\n def from_tensor(self, tensor: torch.Tensor, blade_indices: torch.Tensor) -> torch.Tensor:\n \"\"\"Creates a geometric algebra torch.Tensor from a torch.Tensor and blade\n indices. The blade indices have to align with the last axis of the\n tensor.\n\n Args:\n tensor: torch.Tensor to take as values for the geometric algebra tensor\n blade_indices: Blade indices corresponding to the tensor. Can\n be obtained from blade names eg. using get_kind_blade_indices()\n or as indices from the blades list property.\n\n Returns:\n Geometric algebra torch.Tensor from tensor and blade indices\n \"\"\"\n # blade_indices = torch.tensor(blade_indices, dtype=torch.int64).to(dtype=torch.int64)\n # tensor = torch.tensor(tensor, dtype=torch.float32)\n blade_indices = blade_indices.to(dtype=torch.int64)\n tensor = tensor.to(dtype=torch.float32)\n # print(f\"blade_indices={blade_indices}\")\n # print(f\"tensor={tensor}\")\n \n _shape = tensor.shape\n is_scalar = False\n if len(_shape)==1 :\n _shape_final = [1]+ [self.num_blades] \n is_scalar = True\n else:\n _shape_final = list(_shape[:-1]) + [self.num_blades] \n b = torch.zeros(_shape_final)\n\n if False:\n print(f\"blade_indices.shape={blade_indices.shape}\")\n print(f\"tensor.shape={tensor.shape}\")\n print(f\"_shape_final={_shape_final}\")\n \n\n\n # i = blade_indices.view([-1,1])\n # v = tensor.flatten().view([-1,1])\n # i = blade_indices.nonzero().flatten()\n i = blade_indices.flatten()\n # v = tensor.flatten().unsqueeze(1)\n v = tensor.view([-1,_shape[-1]])\n b = b.view([-1,self.num_blades])\n if False:\n print(f\"_shape={_shape},_shape_final={_shape_final}\")\n print(f\"i.shape={i.shape},v.shape={v.shape},b.shape={b.shape}\")\n print(f\"i={i},v={v},b={b}\")\n\n # b[:,i] = v\n try:\n b[:,i] = v\n except:\n print(f\"_shape={_shape},_shape_final={_shape_final}\")\n print(f\"i.shape={i.shape},v.shape={v.shape},b.shape={b.shape}\")\n print(f\"i={i},v={v},b={b}\")\n raise\n b = b.reshape(_shape_final)\n\n if False:\n print(f\"b.shape={b.shape}\")\n\n if is_scalar:\n # b=b.unsqueeze(0)\n b=b.squeeze(0)\n return b\n\n\n # # i = blade_indices.view([-1,1])\n # # v = tensor.flatten().view([-1,1])\n # i = blade_indices.nonzero().flatten()\n # v = tensor.flatten().unsqueeze(1)\n # b = b.view([-1,self.num_blades])\n # # b[:,i] = v\n # try:\n # b[:,i] = v\n # except:\n # print(f\"_shape={_shape},_shape_final={_shape_final}\")\n # print(f\"i.shape={i.shape},v.shape={v.shape},b.shape={b.shape}\")\n # print(f\"i={i},v={v},b={b}\")\n # raise\n # b = b.reshape(_shape_final)\n\n # if is_scalar:\n # b=b.unsqueeze(0)\n # return b\n\n \n\n def from_tensor_with_kind(self, tensor: torch.Tensor, kind: BladeKind) -> torch.Tensor:\n \"\"\"Creates a geometric algebra torch.Tensor from a torch.Tensor and a kind.\n The kind's blade indices have to align with the last axis of the\n tensor.\n\n Args:\n tensor: torch.Tensor to take as values for the geometric algebra tensor\n kind: Kind corresponding to the tensor\n\n Returns:\n Geometric algebra torch.Tensor from tensor and kind\n \"\"\"\n # Put last axis on first axis so scatter_nd becomes easier.\n # Later undo the transposition again.\n # tensor = torch.tensor(tensor, dtype=torch.float32)\n tensor = tensor.to(dtype=torch.float32)\n kind_indices = self.get_kind_blade_indices(kind)\n if False:\n print(f\"tensor={tensor}\")\n print(f\"kind_indices={kind_indices}\")\n return self.from_tensor(tensor, kind_indices)\n\n def from_scalar(self, scalar: numbers.Number) -> torch.Tensor:\n \"\"\"Creates a geometric algebra torch.Tensor with scalar elements.\n\n Args:\n scalar: Elements to be used as scalars\n\n Returns:\n Geometric algebra torch.Tensor from scalars\n \"\"\"\n # return self.from_tensor_with_kind(tf.expand_dims(scalar, axis=-1), BladeKind.SCALAR)\n # print(\"torch.tensor([scalar]).unsqueeze(-1).shape\",torch.tensor([scalar]).unsqueeze(-1).shape)\n return self.from_tensor_with_kind(torch.tensor([scalar]).unsqueeze(-1), BladeKind.SCALAR).squeeze(0)\n\n def e(self, *blades: List[str]) -> torch.Tensor:\n \"\"\"Returns a geometric algebra torch.Tensor with the given blades set\n to 1.\n\n Args:\n blades: list of blade names, can be unnormalized\n\n Returns:\n torch.Tensor with blades set to 1\n \"\"\"\n blade_signs, blade_indices = get_blade_indices_from_names(\n blades, self.blades)\n\n assert type(blade_indices) in [torch.Tensor], \"should be a tensor\"\n if False: blade_indices = torch.tensor(blade_indices)\n\n # # Don't allow duplicate indices\n # tf.Assert(\n # blade_indices.shape[0] == tf.unique(blade_indices)[0].shape[0],\n # [blades]\n # )\n\n # x = (\n # tf.expand_dims(blade_signs, axis=-1) *\n # tf.gather(self.blade_mvs, blade_indices)\n # )\n\n # # a, b -> b\n # return tf.reduce_sum(x, axis=-2)\n\n # print(f\"blade_indices={blade_indices}\")\n # print(f\"torch.unique(blade_indices)={torch.unique(blade_indices)}\")\n # print(f\"torch.unique(blade_indices)[0]={torch.unique(blade_indices)[0]}\")\n # Don't allow duplicate indices\n # assert(\n # blade_indices.shape[0] == torch.unique(blade_indices).shape[0],\n # [blades]\n # )\n assert blade_indices.shape[0] == torch.unique(blade_indices).shape[0], \"indexes not unique\"\n\n x = blade_signs.unsqueeze(-1) * self.blade_mvs[blade_indices]\n\n # a, b -> b\n return x.sum(dim=-2) \n\n def __getattr__(self, name: str) -> torch.Tensor:\n \"\"\"Returns basis blade tensors if name was a basis.\"\"\"\n if name.startswith(\"e\") and (name[1:] == \"\" or int(name[1:]) >= 0):\n return self.e(name[1:])\n raise AttributeError\n\n def dual(self, tensor: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the dual of the geometric algebra tensor.\n\n Args:\n tensor: Geometric algebra tensor to return dual for\n\n Returns:\n Dual of the geometric algebra tensor\n \"\"\"\n tensor = torch.tensor(tensor, dtype=torch.float32)\n # return self.dual_blade_signs * tf.gather(tensor, self.dual_blade_indices, axis=-1)\n return self.dual_blade_signs * tensor[...,self.dual_blade_indices]\n\n def grade_automorphism(self, tensor: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the geometric algebra tensor with odd grades negated.\n See https://en.wikipedia.org/wiki/Paravector#Grade_automorphism.\n\n Args:\n tensor: Geometric algebra tensor to return grade automorphism for\n\n Returns:\n Geometric algebra tensor with odd grades negated\n \"\"\"\n tensor = tensor.to(dtype=torch.float32)\n return mv_grade_automorphism(tensor, self.blade_degrees)\n\n def reversion(self, tensor: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the grade-reversed geometric algebra tensor.\n See https://en.wikipedia.org/wiki/Paravector#Reversion_conjugation.\n\n Args:\n tensor: Geometric algebra tensor to return grade-reversion for\n\n Returns:\n Grade-reversed geometric algebra tensor\n \"\"\"\n tensor = tensor.to(dtype=torch.float32)\n\n return mv_reversion(tensor, self.blade_degrees)\n\n def conjugation(self, tensor: torch.Tensor) -> torch.Tensor:\n \"\"\"Combines reversion and grade automorphism.\n See https://en.wikipedia.org/wiki/Paravector#Clifford_conjugation.\n\n Args:\n tensor: Geometric algebra tensor to return conjugate for\n\n Returns:\n Geometric algebra tensor after `reversion()` and `grade_automorphism()`\n \"\"\"\n tensor = tensor.to(dtype=torch.float32)\n return self.grade_automorphism(self.reversion(tensor))\n\n def simple_inverse(self, a: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the inverted geometric algebra tensor\n `X^-1` such that `X * X^-1 = 1`. Only works for elements that\n square to scalars. Faster than the general inverse.\n\n Args:\n a: Geometric algebra tensor to return inverse for\n\n Returns:\n inverted geometric algebra tensor\n \"\"\"\n a = a.to(dtype=torch.float32)\n\n\n rev_a = self.reversion(a)\n divisor = self.geom_prod(a, rev_a)\n # print(f\"divisor={divisor}\")\n # print(f\"self.is_pure_kind(divisor, BladeKind.SCALAR)={self.is_pure_kind(divisor, BladeKind.SCALAR)}\")\n if not self.is_pure_kind(divisor, BladeKind.SCALAR):\n raise Exception(\n \"Can't invert multi-vector (inversion divisor V ~V not scalar: %s).\" % divisor)\n\n # Divide by scalar part\n return rev_a / divisor[..., :1]\n\n def reg_prod(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the regressive product of two geometric\n algebra tensors.\n\n Args:\n a: Geometric algebra tensor on the left hand side of\n the regressive product\n b: Geometric algebra tensor on the right hand side of\n the regressive product\n\n Returns:\n regressive product of a and b\n \"\"\"\n a = torch.tensor(a, dtype=torch.float32)\n b = torch.tensor(b, dtype=torch.float32)\n\n return self.dual(self.ext_prod(self.dual(a), self.dual(b)))\n\n def ext_prod(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the exterior product of two geometric\n algebra tensors.\n\n Args:\n a: Geometric algebra tensor on the left hand side of\n the exterior product\n b: Geometric algebra tensor on the right hand side of\n the exterior product\n\n Returns:\n exterior product of a and b\n \"\"\"\n a = a.to(dtype=torch.float32)\n b = b.to(dtype=torch.float32)\n\n return mv_multiply(a, b, self._cayley_outer)\n\n def geom_prod(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the geometric product of two geometric\n algebra tensors.\n\n Args:\n a: Geometric algebra tensor on the left hand side of\n the geometric product\n b: Geometric algebra tensor on the right hand side of\n the geometric product\n\n Returns:\n geometric product of a and b\n \"\"\"\n # a = torch.tensor(a, dtype=torch.float32)\n # b = torch.tensor(b, dtype=torch.float32)\n\n # a = torch.tensor(a)\n # b = torch.tensor(b)\n\n a = a.to(dtype=torch.float32)\n b = b.to(dtype=torch.float32)\n return mv_multiply(a, b, self._cayley)\n\n \n def element_wise_prod(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the element-wise product of two geometric\n algebra tensors.\n\n Args:\n a: Geometric algebra tensor on the left hand side of\n the geometric product\n b: Geometric algebra tensor on the right hand side of\n the geometric product\n\n Returns:\n geometric product of a and b\n \"\"\"\n # a = torch.tensor(a, dtype=torch.float32)\n # b = torch.tensor(b, dtype=torch.float32)\n\n # a = torch.tensor(a)\n # b = torch.tensor(b)\n\n a = a.to(dtype=torch.float32)\n b = b.to(dtype=torch.float32)\n return mv_multiply_element_wise(a, b, self._cayley)\n\n\n def inner_prod(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the inner product of two geometric\n algebra tensors.\n\n Args:\n a: Geometric algebra tensor on the left hand side of\n the inner product\n b: Geometric algebra tensor on the right hand side of\n the inner product\n\n Returns:\n inner product of a and b\n \"\"\"\n a = a.to(dtype=torch.float32)\n b = b.to(dtype=torch.float32)\n\n return mv_multiply(a, b, self._cayley_inner)\n\n def geom_conv1d(self, a: torch.Tensor, k: torch.Tensor,\n stride: int, padding: str,\n dilations: Union[int, None] = None) -> torch.Tensor:\n \"\"\"Returns the 1D convolution of a sequence with a geometric algebra\n tensor kernel. The convolution is performed using the geometric\n product.\n\n Args:\n a: Input geometric algebra tensor of shape\n [..., Length, ChannelsIn, Blades]\n k: Geometric algebra tensor for the convolution kernel of shape\n [KernelSize, ChannelsIn, ChannelsOut, Blades]\n stride: Stride to use for the convolution\n padding: \"SAME\" (zero-pad input length so output\n length == input length / stride) or \"VALID\" (no padding)\n Returns:\n Geometric algbra tensor of shape\n [..., OutputLength, ChannelsOut, Blades]\n representing `a` convolved with `k`\n \"\"\"\n a = a.to(dtype=torch.float32)\n k = k.to(dtype=torch.float32)\n\n # return mv_conv1d(a, k, self._cayley, stride=stride, padding=padding)\n return f_mv_conv1d(a, k, self._cayley, stride=stride, padding=padding)\n\n def mv_repr(self, a: torch.Tensor) -> str:\n \"\"\"Returns a string representation for the given\n geometric algebra tensor.\n\n Args:\n a: Geometric algebra tensor to return the representation for\n\n Returns:\n string representation for `a`\n \"\"\"\n a = a.to(dtype=torch.float32)\n\n\n if len(a.shape) == 1:\n return \"MultiVector[%s]\" % \" + \".join(\n \"%.2f*%s\" % (value, get_blade_repr(blade_name))\n for value, blade_name\n in zip(a, self.blades)\n if value != 0\n )\n else:\n return f\"MultiVector[batch_shape={a.shape[:-1]}]\"\n\n def approx_exp(self, a: torch.Tensor, order: int = 50) -> torch.Tensor:\n \"\"\"Returns an approximation of the exponential using a centered taylor series.\n\n Args:\n a: Geometric algebra tensor to return exponential for\n order: order of the approximation\n\n Returns:\n Approximation of `exp(a)`\n \"\"\"\n a = a.to(dtype=torch.float32)\n\n v = self.from_scalar(1.0)\n result = self.from_scalar(1.0)\n for i in range(1, order + 1):\n v = self.geom_prod(a, v)\n # i_factorial = tf.exp(tf.math.lgamma(i + 1.0))\n i_factorial = torch.exp(torch.lgamma(torch.tensor([i + 1.0])))\n result += v / i_factorial\n return result\n\n def exp(self, a: torch.Tensor, square_scalar_tolerance: Union[float, None] = 1e-4) -> torch.Tensor:\n \"\"\"Returns the exponential of the passed geometric algebra tensor.\n Only works for multivectors that square to scalars.\n\n Args:\n a: Geometric algebra tensor to return exponential for\n square_scalar_tolerance: Tolerance to use for the square scalar check\n or None if the check should be skipped\n\n Returns:\n `exp(a)`\n \"\"\"\n # See https://www.euclideanspace.com/maths/algebra/clifford/algebra/functions/exponent/index.htm\n # for an explanation of how to exponentiate multivectors.\n\n self_sq = self.geom_prod(a, a)\n\n if square_scalar_tolerance is not None:\n # tf.Assert(tf.reduce_all(\n # tf.abs(self_sq[..., 1:]) < square_scalar_tolerance\n # ), [self_sq])\n \n # assert torch.equal(torch.all(self_sq[..., 1:].abs() < square_scalar_tolerance),[self_sq]), \"not sure what\"\n assert torch.all(self_sq[..., 1:].abs() < square_scalar_tolerance), \"square_scalar_tolerance not met\"\n\n scalar_self_sq = self_sq[..., :1]\n\n # \"Complex\" square root (argument can be negative)\n s_sqrt = torch.sign(scalar_self_sq) * torch.sqrt(torch.abs(scalar_self_sq))\n\n # Square to +1: cosh(sqrt(||a||)) + a / sqrt(||a||) sinh(sqrt(||a||))\n # Square to -1: cos(sqrt(||a||)) + a / sqrt(||a||) sin(sqrt(||a||))\n # TODO: Does this work for values other than 1 too? eg. square to +0.5?\n # TODO: Find a solution that doesnt require calculating all possibilities\n # first.\n non_zero_result = torch.where(\n scalar_self_sq < 0,\n (self.from_tensor(torch.cos(s_sqrt), torch.tensor([0])) + a / s_sqrt * torch.sin(s_sqrt)),\n (self.from_tensor(torch.cosh(s_sqrt), torch.tensor([0])) + a / s_sqrt * torch.sinh(s_sqrt))\n )\n\n return torch.where(scalar_self_sq == 0, self.from_scalar(1.0) + a, non_zero_result)\n\n def approx_log(self, a: torch.Tensor, order: int = 50) -> torch.Tensor:\n \"\"\"Returns an approximation of the natural logarithm using a centered\n taylor series. Only converges for multivectors where `||mv - 1|| < 1`.\n\n Args:\n a: Geometric algebra tensor to return logarithm for\n order: order of the approximation\n\n Returns:\n Approximation of `log(a)`\n \"\"\"\n a = a.to(dtype=torch.float32)\n\n result = self.from_scalar(0.0)\n\n a_minus_one = a - self.from_scalar(1.0)\n v = None\n\n for i in range(1, order + 1):\n v = a_minus_one if v is None else v * a_minus_one\n result += (((-1.0) ** i) / i) * v\n\n return -result\n\n def int_pow(self, a: torch.Tensor, n: int) -> torch.Tensor:\n \"\"\"Returns the geometric algebra tensor to the power of an integer\n using repeated multiplication.\n\n Args:\n a: Geometric algebra tensor to raise\n n: integer power to raise the multivector to\n\n Returns:\n `a` to the power of `n`\n \"\"\"\n a = a.to(dtype=torch.float32)\n\n\n if not isinstance(n, int):\n raise Exception(\"n must be an integer.\")\n if n < 0:\n raise Exception(\"Can't raise to negative powers.\")\n\n if n == 0:\n # TODO: more efficient (ones only in scalar)\n return torch.ones_like(a) * self.e(\"\")\n\n result = a\n for i in range(n - 1):\n result = self.geom_prod(result, a)\n return result\n\n def keep_blades(self, a: torch.Tensor, blade_indices: List[int]) -> torch.Tensor:\n \"\"\"Takes a geometric algebra tensor and returns it with only the given\n blade_indices as non-zeros.\n\n Args:\n a: Geometric algebra tensor to copy\n blade_indices: Indices for blades to keep\n\n Returns:\n `a` with only `blade_indices` components as non-zeros\n \"\"\"\n a = a.to(dtype=torch.float32)\n blade_indices = blade_indices.to(dtype=torch.int64)\n\n # blade_values = tf.gather(a, blade_indices, axis=-1)\n blade_values = a[...,blade_indices]\n if True: \n b = self.from_tensor(blade_values, blade_indices)\n else:\n blade_mask = torch.zeros(self.num_blades)\n blade_mask[blade_indices] = 1\n b = self.from_tensor(blade_values, blade_mask)\n # print(f\"blade_values, blade_indices, b={blade_values}, {blade_indices}, {b}\")\n # print(f\"blade_mask={blade_mask}\")\n return b\n\n # return self.from_tensor(blade_values, blade_indices)\n\n def keep_blades_with_name(self, a: torch.Tensor, blade_names: Union[List[str], str]) -> torch.Tensor:\n \"\"\"Takes a geometric algebra tensor and returns it with only the given\n blades as non-zeros.\n\n Args:\n a: Geometric algebra tensor to copy\n blade_names: Blades to keep\n\n Returns:\n `a` with only `blade_names` components as non-zeros\n \"\"\"\n if isinstance(blade_names, str):\n blade_names = [blade_names]\n\n _, blade_indices = get_blade_indices_from_names(blade_names, self.blades)\n\n if False:\n print(f\"self.blades={self.blades}\")\n print(f\"blade_names={blade_names}\")\n print(f\"blade_indices={blade_indices}\")\n\n return self.keep_blades(a, blade_indices)\n\n def select_blades(self, a: torch.Tensor, blade_indices: List[int]) -> torch.Tensor:\n \"\"\"Takes a geometric algebra tensor and returns a `torch.Tensor` with the\n blades in blade_indices on the last axis.\n\n\n Args:\n a: Geometric algebra tensor to copy\n blade_indices: Indices for blades to select\n\n Returns:\n `torch.Tensor` based on `a` with `blade_indices` on last axis.\n \"\"\"\n a = a.to(dtype=torch.float32) \n # blade_indices = torch.tensor(blade_indices, dtype=torch.int64).to(dtype=torch.int64)\n blade_indices = blade_indices.to(dtype=torch.int64)\n\n # result = tf.gather(a, blade_indices, axis=-1)\n try:\n if len(a.shape)==1 or a.shape[-1]==a.size().numel():\n result = a.squeeze()[blade_indices]\n else:\n result = a[...,blade_indices]\n except:\n print(f\"a={a},blade_indices={blade_indices}\")\n print(f\"a.shape={a.shape},blade_indices.shape={blade_indices.shape},a.size().numel()={a.size().numel()}\")\n raise\n \n return result\n\n def select_blades_with_name(self, a: torch.Tensor, blade_names: Union[List[str], str]) -> torch.Tensor:\n \"\"\"Takes a geometric algebra tensor and returns a `torch.Tensor` with the\n blades in blade_names on the last axis.\n\n\n Args:\n a: Geometric algebra tensor to copy\n blade_names: Blades to keep\n\n Returns:\n `torch.Tensor` based on `a` with `blade_names` on last axis.\n \"\"\"\n a = a.to(dtype=torch.float32)\n\n is_single_blade = isinstance(blade_names, str)\n if is_single_blade:\n blade_names = [blade_names]\n\n blade_signs, blade_indices = get_blade_indices_from_names(\n blade_names, self.blades)\n\n result = blade_signs * self.select_blades(a, blade_indices)\n # if True:\n # print(f\"\")\n\n if is_single_blade:\n return result[..., 0]\n\n return result\n\n def inverse(self, a: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the inverted geometric algebra tensor\n `X^-1` such that `X * X^-1 = 1`.\n\n Using Shirokov's inverse algorithm that works in arbitrary dimensions,\n see https://arxiv.org/abs/2005.04015 Theorem 4.\n\n Args:\n a: Geometric algebra tensor to return inverse for\n\n Returns:\n inverted geometric algebra tensor\n \"\"\"\n # a = torch.tensor(a, dtype=torch.float32)\n a = a.to(dtype=torch.float32)\n if False:\n print(f\"a={a}\")\n\n n = 2 ** ((len(self.metric) + 1) // 2)\n\n # u = a.clone()\n u = a\n for k in range(1, n):\n # c = n / k * self.keep_blades_with_name(u, \"\")\n d = self.keep_blades_with_name(u, \"\")\n c = n / k * d\n u_minus_c = u - c\n if False:\n print(f\"a,d,c,u_minus_c, u = {a},{d},{c},{u_minus_c}, {u}\")\n u = self.geom_prod(a, u_minus_c)\n if False:\n print(f\"u={u}\")\n \n if False:\n print(f\"n={n}\")\n print(f\"a={a}\")\n print(f\"u={u}\")\n if not torch.all(self.is_pure_kind(u, BladeKind.SCALAR)):\n raise Exception(\n \"Can't invert multi-vector (det U not scalar: %s).\" % u)\n\n # adj / det\n return u_minus_c / u[..., :1]\n\n def __call__(self, a: torch.Tensor) -> MultiVector:\n \"\"\"Creates a `MultiVector` from a geometric algebra tensor.\n Mainly used as a wrapper for the algebra's functions for convenience.\n\n Args:\n a: Geometric algebra tensor to return `MultiVector` for\n\n Returns:\n `MultiVector` for `a`\n \"\"\"\n a = a.to(dtype=torch.float32)\n return MultiVector(a, self)\n # return MultiVector(torch.tensor(a), self)" } ]
from typing import List, Union from .blades import BladeKind from .torch_ga import GeometricAlgebra import numpy as np import torch import torch.nn as nn import torch.nn.init as init
10,926
"""Provides Geometric Algebra Keras layers.""" class GeometricAlgebraLayer(nn.Module): def __init__(self, algebra: GeometricAlgebra, **kwargs): self.algebra = algebra self.built = False super().__init__(**kwargs) def build(self): assert(False), "why me?" @classmethod def from_config(cls, config): # Create algebra if necessary (should only occur once, assumes that # config is actually mutable). if "algebra" not in config: assert "metric" in config config["algebra"] = GeometricAlgebra(config["metric"]) del config["metric"] return cls(**config) def get_config(self): # Store metric of the algebra. In from_config() we will recreate the # algebra from the metric. config = super().get_config() config.update({ "metric": self.algebra.metric.numpy() }) return config # @register_keras_serializable(package="TFGA") class TensorToGeometric(GeometricAlgebraLayer): """Layer for converting tensors with given blade indices to geometric algebra tensors. Args: algebra: GeometricAlgebra instance to use blade_indices: blade indices to interpret the last axis of the input tensor as """ def __init__(self, algebra: GeometricAlgebra, blade_indices: List[int], **kwargs): super().__init__(algebra=algebra, **kwargs) self.blade_indices = torch.tensor(blade_indices, dtype=torch.int64) # self.blade_indices = blade_indices.to(dtype=torch.int64) self.built = False def compute_output_shape(self, input_shape): return [*input_shape[:-1], self.algebra.num_blades] def forward(self, inputs): if not self.build: self.build(inputs.shape) return self.algebra.from_tensor(inputs, blade_indices=self.blade_indices) def build(self,input_shape): self.built = True def get_config(self): config = super().get_config() config.update({ "blade_indices": self.blade_indices.numpy() }) return config # @register_keras_serializable(package="TFGA") class TensorWithKindToGeometric(GeometricAlgebraLayer): """Layer for converting tensors with given blade kind to geometric algebra tensors. Args: algebra: GeometricAlgebra instance to use kind: blade kind indices to interpret the last axis of the input tensor as """
"""Provides Geometric Algebra Keras layers.""" class GeometricAlgebraLayer(nn.Module): def __init__(self, algebra: GeometricAlgebra, **kwargs): self.algebra = algebra self.built = False super().__init__(**kwargs) def build(self): assert(False), "why me?" @classmethod def from_config(cls, config): # Create algebra if necessary (should only occur once, assumes that # config is actually mutable). if "algebra" not in config: assert "metric" in config config["algebra"] = GeometricAlgebra(config["metric"]) del config["metric"] return cls(**config) def get_config(self): # Store metric of the algebra. In from_config() we will recreate the # algebra from the metric. config = super().get_config() config.update({ "metric": self.algebra.metric.numpy() }) return config # @register_keras_serializable(package="TFGA") class TensorToGeometric(GeometricAlgebraLayer): """Layer for converting tensors with given blade indices to geometric algebra tensors. Args: algebra: GeometricAlgebra instance to use blade_indices: blade indices to interpret the last axis of the input tensor as """ def __init__(self, algebra: GeometricAlgebra, blade_indices: List[int], **kwargs): super().__init__(algebra=algebra, **kwargs) self.blade_indices = torch.tensor(blade_indices, dtype=torch.int64) # self.blade_indices = blade_indices.to(dtype=torch.int64) self.built = False def compute_output_shape(self, input_shape): return [*input_shape[:-1], self.algebra.num_blades] def forward(self, inputs): if not self.build: self.build(inputs.shape) return self.algebra.from_tensor(inputs, blade_indices=self.blade_indices) def build(self,input_shape): self.built = True def get_config(self): config = super().get_config() config.update({ "blade_indices": self.blade_indices.numpy() }) return config # @register_keras_serializable(package="TFGA") class TensorWithKindToGeometric(GeometricAlgebraLayer): """Layer for converting tensors with given blade kind to geometric algebra tensors. Args: algebra: GeometricAlgebra instance to use kind: blade kind indices to interpret the last axis of the input tensor as """
def __init__(self, algebra: GeometricAlgebra, kind: BladeKind,
0
2023-10-07 13:34:07+00:00
16k
Significant-Gravitas/autostandup
bot.py
[ { "identifier": "StreaksDB", "path": "streaks/streaks_db.py", "snippet": "class StreaksDB(BaseDB):\n \"\"\"\n StreaksDB class handles all operations related to the 'streaks' table.\n Inherits from the BaseDB class.\n \"\"\"\n\n def __init__(self, host, user, password, database, port):\n \"\"\"\n Initializes the StreaksDB class and creates the 'streaks' table if it doesn't exist.\n\n :param host: The MySQL host address.\n :param user: The MySQL user.\n :param password: The MySQL password.\n :param database: The MySQL database name.\n :param port: The MySQL port number.\n \"\"\"\n super().__init__(host, user, password, database, port)\n self._create_streaks_table()\n\n def _create_streaks_table(self):\n \"\"\"\n Creates the 'streaks' table if it doesn't already exist.\n \"\"\"\n query = '''\n CREATE TABLE IF NOT EXISTS streaks (\n discord_id BIGINT PRIMARY KEY,\n current_streak INT DEFAULT 0,\n FOREIGN KEY (discord_id) REFERENCES team_members(discord_id) ON DELETE CASCADE\n );\n '''\n try:\n self.execute_query(query)\n finally:\n self.close()\n\n def update_streak(self, discord_id: int, new_streak: int):\n \"\"\"\n Updates the streak for a given user.\n\n :param discord_id: The Discord ID of the user.\n :param new_streak: The new streak count.\n \"\"\"\n query = \"\"\"\n INSERT INTO streaks (discord_id, current_streak)\n VALUES (%s, %s)\n ON DUPLICATE KEY UPDATE current_streak = %s\n \"\"\"\n params = (discord_id, new_streak, new_streak)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n\n def get_streak(self, discord_id: int) -> int:\n \"\"\"\n Fetches the current streak for a given user.\n\n :param discord_id: The Discord ID of the user.\n :return: The current streak count.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n c = self.conn.cursor()\n query = \"SELECT current_streak FROM streaks WHERE discord_id = %s\"\n params = (discord_id,)\n try:\n c.execute(query, params)\n row = c.fetchone()\n return row[0] if row else 0\n finally:\n c.close()\n self.close()" }, { "identifier": "TeamMemberDB", "path": "team_members/team_member_db.py", "snippet": "class TeamMemberDB(BaseDB):\n \"\"\"\n TeamMemberDB class handles operations related to the 'team_members' table.\n\n :param host: The MySQL host address.\n :param user: The MySQL user.\n :param password: The MySQL password.\n :param database: The MySQL database name.\n :param port: The MySQL port number.\n \"\"\"\n\n def __init__(self, host: str, user: str, password: str, database: str, port: str):\n \"\"\"\n Initializes the TeamMemberDB class and creates the 'team_members' table if it doesn't exist.\n \"\"\"\n super().__init__(host, user, password, database, port)\n self._create_team_members_table()\n\n def _create_team_members_table(self):\n \"\"\"\n Creates the 'team_members' table if it doesn't already exist.\n \"\"\"\n query = '''\n CREATE TABLE IF NOT EXISTS team_members (\n discord_id BIGINT PRIMARY KEY,\n name VARCHAR(255) NOT NULL,\n time_zone VARCHAR(50) NOT NULL,\n github_username VARCHAR(255),\n on_vacation BOOLEAN DEFAULT FALSE\n );\n '''\n try:\n self.execute_query(query)\n finally:\n self.close()\n\n def insert_new_member(self, discord_id: int, name: str, time_zone: str, github_username: str):\n \"\"\"\n Inserts a new team member into the 'team_members' table.\n\n :param discord_id: The Discord ID of the team member.\n :param name: The name of the team member.\n :param time_zone: The time zone of the team member.\n :param github_username: The GitHub username of the team member.\n \"\"\"\n query = \"\"\"\n INSERT INTO team_members (discord_id, name, time_zone, github_username)\n VALUES (%s, %s, %s, %s)\n ON DUPLICATE KEY UPDATE name = %s, time_zone = %s, github_username = %s\n \"\"\"\n params = (discord_id, name, time_zone, github_username, name, time_zone, github_username)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n\n def remove_member(self, discord_id: int):\n \"\"\"\n Removes a team member from the 'team_members' table.\n\n :param discord_id: The Discord ID of the team member to remove.\n \"\"\"\n query = \"DELETE FROM team_members WHERE discord_id = %s\"\n params = (discord_id,)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n\n def list_all_members(self) -> List[Tuple[int, str, str, str, bool]]:\n \"\"\"\n Fetches all team members from the 'team_members' table.\n\n :return: A list of tuples, each containing the Discord ID, name, time zone, GitHub username, and vacation status of a team member.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n c = self.conn.cursor()\n try:\n c.execute(\"SELECT discord_id, name, time_zone, github_username, on_vacation FROM team_members\")\n return c.fetchall()\n finally:\n c.close()\n self.close()\n\n def update_member_timezone(self, discord_id: int, new_time_zone: str):\n \"\"\"\n Updates the timezone of a team member in the 'team_members' table.\n\n :param discord_id: The Discord ID of the team member.\n :param new_time_zone: The new timezone to be set for the team member.\n \"\"\"\n query = \"UPDATE team_members SET time_zone = %s WHERE discord_id = %s\"\n params = (new_time_zone, discord_id)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n\n def set_vacation_status(self, discord_id: int, on_vacation: bool):\n \"\"\"\n Sets the vacation status of a team member in the 'team_members' table.\n\n :param discord_id: The Discord ID of the team member.\n :param on_vacation: The vacation status to be set for the team member.\n \"\"\"\n query = \"UPDATE team_members SET on_vacation = %s WHERE discord_id = %s\"\n params = (on_vacation, discord_id)\n try:\n self.execute_query(query, params)\n finally:\n self.close()" }, { "identifier": "UpdatesDB", "path": "updates/updates_db.py", "snippet": "class UpdatesDB(BaseDB):\n \"\"\"\n Database class for handling operations related to the 'updates' table.\n \"\"\"\n\n def __init__(self, host: str, user: str, password: str, database: str, port: str):\n \"\"\"\n Initializes the UpdatesDB class and creates the 'updates' table if it doesn't exist.\n\n :param host: The MySQL host address.\n :param user: The MySQL user.\n :param password: The MySQL password.\n :param database: The MySQL database name.\n :param port: The MySQL port number.\n \"\"\"\n super().__init__(host, user, password, database, port)\n self._create_updates_table()\n\n def _create_updates_table(self):\n \"\"\"\n Creates the 'updates' table if it doesn't already exist.\n \"\"\"\n query = '''\n CREATE TABLE IF NOT EXISTS updates (\n id INT AUTO_INCREMENT PRIMARY KEY,\n discord_id BIGINT,\n status TEXT NOT NULL,\n summarized_status TEXT,\n timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\n time_zone VARCHAR(255),\n FOREIGN KEY (discord_id) REFERENCES team_members(discord_id) ON DELETE CASCADE\n )\n '''\n try:\n self.execute_query(query)\n finally:\n self.close()\n\n def insert_status(self, discord_id: int, status: str, time_zone: str):\n \"\"\"\n Inserts a new status update into the 'updates' table.\n\n :param discord_id: The Discord ID of the team member.\n :param status: The status update.\n :param time_zone: The time zone of the user.\n \"\"\"\n # Convert current UTC time to user's local time zone\n utc_now = datetime.utcnow().replace(tzinfo=pytz.utc)\n local_now = utc_now.astimezone(pytz.timezone(time_zone))\n\n query = \"INSERT INTO updates (discord_id, status, timestamp, time_zone) VALUES (%s, %s, %s, %s)\"\n params = (discord_id, status, local_now, time_zone)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n\n def update_summarized_status(self, discord_id: int, summarized_status: str):\n \"\"\"\n Updates the summarized_status for the most recent update for a given user.\n\n :param discord_id: The Discord ID of the team member.\n :param summarized_status: The summarized status update.\n \"\"\"\n query = \"\"\"\n UPDATE updates\n SET summarized_status = %s\n WHERE discord_id = %s\n ORDER BY timestamp DESC\n LIMIT 1\n \"\"\"\n params = (summarized_status, discord_id)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n \n def get_weekly_checkins_count(self, discord_id: int, time_zone: str) -> int:\n \"\"\"\n Fetches the number of check-ins for a given user in the current week.\n\n :param discord_id: The Discord ID of the user.\n :param time_zone: The time zone of the user.\n :return: The count of check-ins in the current week.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor()\n \n # Adjusting the current time to the user's time zone\n local_tz = pytz.timezone(time_zone)\n local_now = datetime.now(local_tz)\n \n # Getting the Monday of the current week in the user's time zone\n monday = local_now - timedelta(days=local_now.weekday())\n monday = monday.replace(hour=0, minute=0, second=0, microsecond=0)\n\n query = \"\"\"\n SELECT COUNT(*) FROM updates\n WHERE discord_id = %s AND timestamp >= %s\n \"\"\"\n params = (discord_id, monday)\n try:\n c.execute(query, params)\n \n row = c.fetchone()\n return row[0] if row else 0\n finally:\n c.close()\n self.close()\n\n def get_statuses_in_date_range(self, discord_id: int, start_date: datetime, end_date: datetime) -> List[str]:\n \"\"\"\n Fetches all raw status updates for a given user within a specified date range.\n\n Args:\n discord_id: The Discord ID of the user.\n start_date: The start date of the date range.\n end_date: The end date of the date range.\n\n Returns:\n A list of raw status updates.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor()\n \n query = \"\"\"\n SELECT summarized_status FROM updates\n WHERE discord_id = %s AND timestamp >= %s AND timestamp <= %s\n \"\"\"\n params = (discord_id, start_date, end_date)\n try:\n c.execute(query, params)\n \n statuses = [row[0] for row in c.fetchall()]\n return statuses\n finally:\n c.close()\n self.close()\n \n def get_all_statuses_for_user(self, discord_id: int) -> List[dict]:\n \"\"\"\n Fetches all status updates (both raw and summarized) for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n\n Returns:\n A list of dictionaries, each containing the status update details for a given record.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor(dictionary=True) # Set dictionary=True to return results as dictionaries\n \n query = \"\"\"\n SELECT id, discord_id, status, summarized_status, timestamp \n FROM updates\n WHERE discord_id = %s\n ORDER BY timestamp DESC\n \"\"\"\n params = (discord_id,)\n try:\n c.execute(query, params)\n \n statuses = c.fetchall()\n return statuses\n finally:\n c.close()\n self.close()\n \n def get_last_update_timestamp(self, discord_id: int) -> Tuple[datetime, str]:\n \"\"\"\n Fetches the timestamp and time zone of the last status update for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n\n Returns:\n A tuple containing the timestamp of the last update and its time zone, or (None, None) if there are no updates.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor()\n \n query = \"\"\"\n SELECT timestamp, time_zone FROM updates\n WHERE discord_id = %s\n ORDER BY timestamp DESC\n LIMIT 1\n \"\"\"\n params = (discord_id,)\n try:\n c.execute(query, params)\n \n row = c.fetchone()\n return (row[0], row[1]) if row else (None, None)\n finally:\n c.close()\n self.close()\n \n def delete_newest_status(self, discord_id: int) -> None:\n \"\"\"\n Deletes the most recent status update for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor()\n \n # Fetch the ID of the newest status update for the given user\n query_get_id = \"\"\"\n SELECT id FROM updates\n WHERE discord_id = %s\n ORDER BY timestamp DESC\n LIMIT 1\n \"\"\"\n try:\n c.execute(query_get_id, (discord_id,))\n \n row = c.fetchone()\n if row:\n status_id = row[0]\n \n # Now, delete the status update using its ID\n query_delete = \"\"\"\n DELETE FROM updates WHERE id = %s\n \"\"\"\n c.execute(query_delete, (status_id,))\n \n self.conn.commit()\n finally:\n c.close()\n self.close()" }, { "identifier": "WeeklyPostsDB", "path": "weekly_posts/weekly_posts_db.py", "snippet": "class WeeklyPostsDB(BaseDB):\n \"\"\"\n Database class that handles operations related to the 'weekly_posts' table.\n \"\"\"\n\n def __init__(self, host: str, user: str, password: str, database: str, port: str):\n \"\"\"\n Initializes the WeeklyPostsDB class, connects to the MySQL database,\n and creates the 'weekly_posts' table if it doesn't exist.\n\n :param host: The MySQL host address.\n :param user: The MySQL user.\n :param password: The MySQL password.\n :param database: The MySQL database name.\n :param port: The MySQL port number.\n \"\"\"\n super().__init__(host, user, password, database, port)\n self._create_weekly_posts_table()\n\n def _create_weekly_posts_table(self):\n \"\"\"\n Creates the 'weekly_posts' table if it doesn't already exist.\n \"\"\"\n query = '''\n CREATE TABLE IF NOT EXISTS weekly_posts (\n post_id BIGINT PRIMARY KEY,\n timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP\n );\n '''\n try:\n self.execute_query(query)\n finally:\n self.close()\n\n def get_weekly_post_data(self) -> Optional[Dict[str, datetime.datetime]]:\n \"\"\"\n Fetches the most recent weekly post data from the 'weekly_posts' table.\n\n :return: A dictionary containing the post ID and timestamp, or None if no data exists.\n \"\"\"\n query = \"SELECT post_id, timestamp FROM weekly_posts ORDER BY timestamp DESC LIMIT 1\"\n \n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor()\n try:\n c.execute(query)\n row = c.fetchone()\n\n if row:\n return {'post_id': row[0], 'timestamp': row[1]}\n return None\n finally:\n c.close()\n self.close()\n\n def save_weekly_post_data(self, post_id: int, timestamp: datetime.datetime):\n \"\"\"\n Inserts or updates the weekly post data in the 'weekly_posts' table.\n\n :param post_id: The ID of the weekly post.\n :param timestamp: The timestamp of the weekly post.\n \"\"\"\n query = \"\"\"\n INSERT INTO weekly_posts (post_id, timestamp)\n VALUES (%s, %s)\n ON DUPLICATE KEY UPDATE timestamp = %s\n \"\"\"\n params = (post_id, timestamp, timestamp)\n try:\n self.execute_query(query, params)\n finally:\n self.close()" }, { "identifier": "StreaksManager", "path": "streaks/streaks_manager.py", "snippet": "class StreaksManager:\n \"\"\"\n Manages the streaks for team members.\n \"\"\"\n \n def __init__(self, streaks_db: StreaksDB):\n \"\"\"\n Initializes a new StreaksManager instance.\n\n Args:\n streaks_db: The StreaksDB object that handles database operations.\n \"\"\"\n self.streaks_db = streaks_db\n \n def get_streak(self, discord_id: int) -> int:\n \"\"\"\n Fetches the current streak for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n\n Returns:\n The current streak count.\n \"\"\"\n return self.streaks_db.get_streak(discord_id)\n\n def update_streak(self, discord_id: int, new_streak: int):\n \"\"\"\n Updates the streak for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n new_streak: The new streak count.\n \"\"\"\n self.streaks_db.update_streak(discord_id, new_streak)\n \n def reset_streak(self, discord_id: int):\n \"\"\"\n Resets the streak for a given user to zero.\n\n Args:\n discord_id: The Discord ID of the user.\n \"\"\"\n self.streaks_db.update_streak(discord_id, 0)" }, { "identifier": "TeamMemberManager", "path": "team_members/team_member_manager.py", "snippet": "class TeamMemberManager:\n \"\"\"\n Manages operations related to team members.\n \"\"\"\n\n def __init__(self, db: TeamMemberDB):\n \"\"\"\n Initialize a TeamMemberManager object.\n\n :param db: TeamMemberDB object for interacting with the database.\n \"\"\"\n self.db = db\n self.team_members = self.load_team_members()\n\n def load_team_members(self) -> List[TeamMember]:\n \"\"\"\n Load team members from the MySQL database into a list of TeamMember objects.\n\n :return: List of TeamMember objects.\n \"\"\"\n team_members = []\n members_data = self.db.list_all_members()\n\n for member_data in members_data:\n member = TeamMember(\n discord_id=member_data[0],\n time_zone=member_data[2],\n name=member_data[1],\n github_username=member_data[3],\n on_vacation=member_data[4]\n )\n team_members.append(member)\n\n return team_members\n\n def find_member(self, discord_id: int) -> TeamMember:\n \"\"\"\n Find and return a team member by their Discord ID.\n\n :param discord_id: The Discord ID of the team member.\n :return: A TeamMember object if found, otherwise None.\n \"\"\"\n for member in self.team_members:\n if member.discord_id == discord_id:\n return member\n return None\n\n def add_member(self, discord_id: int, name: str, time_zone: str, github_username: str):\n \"\"\"\n Add a new team member to the list and the database.\n\n :param discord_id: The Discord ID of the new member.\n :param name: The name of the new member.\n :param time_zone: The time zone of the new member.\n :param github_username: The GitHub username of the new member.\n \"\"\"\n new_member = TeamMember(discord_id, time_zone, name, github_username)\n self.db.insert_new_member(discord_id, name, time_zone, github_username)\n self.team_members.append(new_member)\n\n def remove_member(self, discord_id: int):\n \"\"\"\n Remove a team member from the list and the database.\n\n :param discord_id: The Discord ID of the member to remove.\n \"\"\"\n self.db.remove_member(discord_id)\n self.team_members = [member for member in self.team_members if member.discord_id != discord_id]\n\n def update_member_timezone(self, discord_id: int, new_time_zone: str):\n \"\"\"\n Update the timezone of a team member in the database and the list.\n\n :param discord_id: The Discord ID of the member to update.\n :param new_time_zone: The new timezone string to set for the member.\n \"\"\"\n # Update the timezone in the database\n self.db.update_member_timezone(discord_id, new_time_zone)\n\n # Find the member in the team_members list and update their timezone\n member = self.find_member(discord_id)\n if member:\n member.time_zone = new_time_zone\n\n def set_member_vacation_status(self, discord_id: int, on_vacation: bool):\n \"\"\"\n Sets the vacation status of a team member.\n\n :param discord_id: The Discord ID of the team member.\n :param on_vacation: The vacation status to be set for the team member.\n \"\"\"\n # Update the vacation status in the database\n self.db.set_vacation_status(discord_id, on_vacation)\n\n # Find the member in the team_members list and update their vacation status\n member = self.find_member(discord_id)\n if member:\n member.on_vacation = on_vacation" }, { "identifier": "UpdatesManager", "path": "updates/updates_manager.py", "snippet": "class UpdatesManager:\n \"\"\"\n Manages status updates for team members.\n \"\"\"\n\n def __init__(self, updates_db: UpdatesDB):\n \"\"\"\n Initializes a new UpdatesManager instance.\n\n Args:\n updates_db: The UpdatesDB object that handles database operations.\n \"\"\"\n self.updates_db = updates_db\n\n def insert_status(self, discord_id: int, status: str, time_zone: str):\n \"\"\"\n Inserts a new status update.\n\n Args:\n discord_id: The Discord ID of the team member.\n status: The status update.\n \"\"\"\n self.updates_db.insert_status(discord_id, status, time_zone)\n\n def update_summarized_status(self, discord_id: int, summarized_status: str):\n \"\"\"\n Updates the summarized status for the most recent update for a given user.\n\n Args:\n discord_id: The Discord ID of the team member.\n summarized_status: The summarized status update.\n \"\"\"\n self.updates_db.update_summarized_status(discord_id, summarized_status)\n\n def get_weekly_checkins_count(self, discord_id: int, time_zone: str) -> int:\n \"\"\"\n Fetches the number of check-ins for a given user in the current week.\n\n Args:\n discord_id: The Discord ID of the user.\n time_zone: The time zone of the user.\n\n Returns:\n The count of check-ins in the current week.\n \"\"\"\n return self.updates_db.get_weekly_checkins_count(discord_id, time_zone)\n \n def get_all_statuses_for_user(self, discord_id: int) -> List[dict]:\n \"\"\"\n Fetches all status updates (both raw and summarized) for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n\n Returns:\n A list of dictionaries, each containing the status update details for a given record.\n \"\"\"\n return self.updates_db.get_all_statuses_for_user(discord_id)\n\n def get_last_update_timestamp(self, discord_id: int) -> Tuple[datetime, str]:\n \"\"\"\n Fetches the timestamp and time zone of the last status update for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n\n Returns:\n A tuple containing the timestamp of the last update and its time zone, or (None, None) if there are no updates.\n \"\"\"\n return self.updates_db.get_last_update_timestamp(discord_id)\n\n def delete_newest_status(self, discord_id: int) -> None:\n \"\"\"\n Deletes the most recent status update for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n \"\"\"\n self.updates_db.delete_newest_status(discord_id)\n\n async def generate_daily_summary(self, user_message: str) -> str:\n \"\"\"\n Generates a daily summary of the user's message using a large language model.\n\n Args:\n user_message: The user's message that needs to be summarized.\n\n Returns:\n The summarized message.\n \"\"\"\n # Prepare a system message to guide OpenAI's model\n system_message = \"Please summarize the user's update into two sections: 'Did' for tasks completed yesterday and 'Do' for tasks planned for today.\"\n \n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": user_message}\n ]\n \n # Specify the model engine you want to use\n model_engine = \"gpt-3.5-turbo-1106\"\n \n try:\n # Make an API call to OpenAI's ChatCompletion\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n \n # Extract the generated text\n summarized_message = response['choices'][0]['message']['content'].strip()\n\n return summarized_message\n \n except Exception as e:\n print(f\"An error occurred while generating the summary: {e}\")\n return \"Error in generating summary\"\n\n async def generate_weekly_summary(self, discord_id: int, start_date: datetime, end_date: datetime) -> str:\n \"\"\"\n Generates a weekly summary of the user's status updates using a large language model.\n\n Args:\n discord_id: The Discord ID of the user.\n start_date: The start date of the date range.\n end_date: The end date of the date range.\n\n Returns:\n The summarized weekly status update.\n \"\"\"\n # Fetch all raw status updates for the specified date range using the new method in UpdatesDB\n weekly_statuses = self.updates_db.get_statuses_in_date_range(discord_id, start_date, end_date)\n\n if not weekly_statuses:\n return \"There are no status updates for this week.\"\n \n # Combine all raw statuses into a single string\n combined_statuses = \"\\n\".join(weekly_statuses)\n \n # Prepare a system message to guide OpenAI's model for weekly summary\n system_message = \"Please generate a comprehensive weekly summary based on the provided daily status updates, including only tasks that have been accomplished. Ignore tasks that are not in the 'Did' section.\"\n \n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": combined_statuses}\n ]\n \n # Specify the model engine you want to use\n model_engine = \"gpt-4-0613\"\n \n try:\n # Make an API call to OpenAI's ChatCompletion\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n \n # Extract the generated text\n weekly_summary = response['choices'][0]['message']['content'].strip()\n\n return weekly_summary\n \n except Exception as e:\n print(f\"An error occurred while generating the weekly summary: {e}\")\n return \"Error in generating weekly summary\"\n \n async def summarize_technical_updates(self, commit_messages: List[str]) -> str:\n \"\"\"\n Summarizes the technical updates based on commit messages.\n\n Args:\n commit_messages: List of commit messages for the day.\n\n Returns:\n A summarized version of the technical updates.\n \"\"\"\n\n # Combine commit messages into a single string for the LLM\n combined_commits = \"\\n\".join(commit_messages)\n\n # If there are no commit messages, return a default message\n if not combined_commits:\n return \"No technical updates found based on commit messages.\"\n\n # Summarization using LLM\n system_message = \"Please provide a concise summary of the technical updates based on the provided commit messages.\"\n\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": combined_commits}\n ]\n\n model_engine = \"gpt-3.5-turbo-1106\"\n\n try:\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n\n # Extract the generated summary\n summarized_message = response['choices'][0]['message']['content'].strip()\n\n return summarized_message\n\n except Exception as e:\n print(f\"An error occurred while generating the technical summary: {e}\")\n return \"Error in generating technical summary.\"\n\n async def summarize_feedback_and_revisions(self, original_report: str, feedback: str) -> str:\n \"\"\"\n Takes the original report and user feedback and generates a revised summary.\n\n Args:\n original_report: The original summarized report.\n feedback: The user's feedback or suggested edits.\n\n Returns:\n The revised summary.\n \"\"\"\n # Prepare a system message to guide OpenAI's model\n system_message = \"Revise the original report based on the user's feedback.\"\n\n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": f\"Original Report: {original_report}\"},\n {\"role\": \"user\", \"content\": f\"Feedback: {feedback}\"}\n ]\n \n # Specify the model engine you want to use\n model_engine = \"gpt-3.5-turbo-1106\"\n \n try:\n # Make an API call to OpenAI's ChatCompletion\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n \n # Extract the generated text\n revised_summary = response['choices'][0]['message']['content'].strip()\n\n return revised_summary\n \n except Exception as e:\n print(f\"An error occurred while generating the revised summary: {e}\")\n return \"Error in generating revised summary\"\n\n async def summarize_non_technical_updates(self, update: str) -> str:\n \"\"\"\n Summarizes a non-technical update using a large language model.\n\n Args:\n update: The raw non-technical update provided by the user.\n\n Returns:\n The summarized non-technical update.\n \"\"\"\n\n # System message to guide the LLM for a concise summary\n system_message = \"Please provide a concise summary of the non-technical update shared by the user.\"\n\n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": update}\n ]\n\n # Specify the model engine you want to use\n model_engine = \"gpt-3.5-turbo-1106\"\n\n try:\n # Make an API call to OpenAI's ChatCompletion\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n\n # Extract the generated summary\n summarized_message = response['choices'][0]['message']['content'].strip()\n\n return summarized_message\n\n except Exception as e:\n print(f\"An error occurred while generating the non-technical summary: {e}\")\n return \"Error in generating summary\"\n\n async def summarize_goals_for_the_day(self, goals: str) -> str:\n \"\"\"\n Summarizes the user's goals for the day using a large language model.\n\n Args:\n goals: The user's raw input on their goals for the day.\n\n Returns:\n The summarized goals for the day.\n \"\"\"\n # Initiate the conversation with the model\n system_message = \"Please provide a concise summary of the user's goals for today.\"\n \n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": goals}\n ]\n \n # Specify the model engine you want to use (this is an example and can be adjusted based on your needs)\n model_engine = \"gpt-3.5-turbo-1106\"\n \n try:\n # Provide user's input and retrieve model's response\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n \n # Extract the generated text\n summarized_goals = response['choices'][0]['message']['content'].strip()\n\n # Return the summary\n return summarized_goals\n \n except Exception as e:\n print(f\"An error occurred while generating the goals summary: {e}\")\n return \"Error in generating goals summary\"\n \n async def evaluate_performance(self, user_message: str) -> str:\n \"\"\"\n Evaluates the performance of the user based on their update.\n\n Args:\n user_message: The user's message that needs to be evaluated.\n\n Returns:\n The evaluation of the user's performance.\n \"\"\"\n # Prepare a system message to guide OpenAI's model\n system_message = \"\"\"\n You are a project manager at a fast-paced tech startup, recognized for providing clear and actionable feedback during stand-up meetings. Your role is to evaluate the quality of team members' daily stand-up reports, with a focus on clear communication, comprehensive planning, and problem-solving abilities.\n It is essential to note that team members should neither be penalized nor rewarded for merely mentioning issues; instead, the emphasis should be on the clarity of the report and the quality of strategies proposed to address these issues.\n Your feedback is candid and aimed at encouraging high-quality reporting and effective planning within the startup environment.\n Please provide a two-sentence summary of the stand-up and assign a grade (A, B, C, D, or F) based on the following criteria:\n\n - A: Excellent - The report is exceptionally clear and detailed, with well-defined tasks and a thorough approach to tackling issues, exemplifying the proactive and problem-solving ethos of our startup.\n - B: Good - The report is clear and adequately detailed, outlining tasks and addressing issues with a reasonable approach, indicating a commitment to momentum and resolution.\n - C: Fair - The report is understandable but lacks detail in some areas, with a basic approach to resolving issues, suggesting a need for further strategy development.\n - D: Poor - The report is vague or missing details, with a limited or unclear approach to issues, necessitating better communication and planning skills.\n - F: Fail - The report is missing, overly vague, or lacks a coherent structure, with no apparent approach to issues, reflecting a need for significant improvement in reporting and strategizing.\n\n A comprehensive stand-up report effectively communicates what was done and what is planned, clearly identifies any issues, and connects daily tasks with broader business objectives.\n\n Provide clear and constructive feedback, aiming to foster a culture of excellence and continuous improvement in how we plan and communicate our daily activities.\n \"\"\"\n \n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": user_message}\n ]\n \n # Specify the model engine you want to use\n model_engine = \"gpt-3.5-turbo-1106\"\n \n try:\n # Make an API call to OpenAI's ChatCompletion\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n \n # Extract the generated text\n performance_evaluation = response['choices'][0]['message']['content'].strip()\n\n return performance_evaluation\n \n except Exception as e:\n print(f\"An error occurred while evaluating the performance: {e}\")\n return \"Error in evaluating performance\"" }, { "identifier": "WeeklyPostManager", "path": "weekly_posts/weekly_post_manager.py", "snippet": "class WeeklyPostManager:\n \"\"\"Manages the status post in a Discord channel.\"\"\"\n \n def __init__(self, channel, weekly_posts_db: WeeklyPostsDB):\n \"\"\"\n Initializes a new WeeklyPostManager instance.\n \"\"\"\n self.channel = channel\n self.weekly_posts_db = weekly_posts_db\n self.editable_weekly_post = None\n self.load_weekly_post_data()\n\n def load_weekly_post_data(self):\n \"\"\"\n Load the weekly post data from the database.\n \n This method queries the 'weekly_posts' table to get the ID and timestamp of \n the last weekly post. If no data exists, it sets the ID and timestamp to None.\n \"\"\"\n data = self.weekly_posts_db.get_weekly_post_data()\n self.editable_weekly_post_id = data.get('post_id', None)\n self.weekly_post_timestamp = data.get('timestamp', None)\n\n def save_weekly_post_data(self):\n \"\"\"\n Save the weekly post data to the database.\n \n This method inserts or updates the ID and timestamp of the current weekly post \n in the 'weekly_posts' table.\n \"\"\"\n self.weekly_posts_db.save_weekly_post_data(self.editable_weekly_post.id, datetime.now())\n\n async def initialize_post(self, team_members: List[TeamMember]):\n \"\"\"\n Initializes or retrieves the weekly status post on Discord.\n\n This function checks if a valid weekly post already exists for the current week.\n If it does, it retrieves that post. Otherwise, it sends a new message in the Discord\n channel with the list of team members and their statuses.\n\n Args:\n team_members: A list of TeamMember objects to be displayed in the post.\n \"\"\"\n current_week_number = datetime.now().isocalendar()[1]\n saved_week_number = self.weekly_post_timestamp.isocalendar()[1] if self.weekly_post_timestamp else None\n\n # Skip initialization if the post already exists and is for the current week\n if self.editable_weekly_post_id and current_week_number == saved_week_number:\n self.editable_weekly_post = await self.channel.fetch_message(self.editable_weekly_post_id)\n return\n\n utc_now = pytz.utc.localize(datetime.utcnow())\n today_weekday = utc_now.weekday()\n last_monday = utc_now - timedelta(days=today_weekday)\n next_sunday = last_monday + timedelta(days=6)\n\n start_date = self.format_date(last_monday)\n end_date = self.format_date(next_sunday)\n\n # Calculate the max name length for alignment purposes\n max_name_length = max([len(m.name) for m in team_members])\n\n member_list = []\n for m in team_members:\n # Include the streak with the fire emoji if the streak is greater than 0\n streak_str = f\" {m.current_streak}🔥\" if m.current_streak > 0 else \"\"\n\n # Construct the new line for the member with the updated information\n new_line = f\"# `{m.name.ljust(max_name_length)} {'❓' * 5} {streak_str}`\"\n member_list.append(new_line)\n\n member_list_str = '\\n'.join(member_list)\n\n await self.channel.send(f\"# Weekly Status Updates\")\n await self.channel.send(f\"## {start_date} to {end_date}\")\n if member_list_str:\n self.editable_weekly_post = await self.channel.send(f\"{member_list_str}\")\n self.save_weekly_post_data() # Save the ID and timestamp after creating the post\n\n async def rebuild_post(self, team_members: List[TeamMember]):\n \"\"\"\n Rebuilds the entire weekly status post from the team members' data.\n\n Args:\n team_members: A list of TeamMember objects with updated statuses and streaks.\n \"\"\"\n # If there are no team members, delete the post and return\n if not team_members:\n if self.editable_weekly_post:\n await self.editable_weekly_post.delete()\n self.editable_weekly_post = None\n return\n\n # Calculate the max name length for alignment purposes\n max_name_length = max([len(m.name) for m in team_members])\n\n member_list = []\n for m in team_members:\n # Get the streak and number of weekly check-ins for the member\n streak = m.current_streak\n check_ins = m.weekly_checkins\n\n # Generate the marks based on the number of check-ins\n marks = \"✅\" * check_ins + \"❓\" * (5 - check_ins)\n\n # Include the streak with the fire emoji if the streak is greater than 0\n streak_str = f\" {streak}🔥\" if streak > 0 else \"\"\n\n # Construct the new line for the member with the updated information\n new_line = f\"# `{m.name.ljust(max_name_length)} {marks} {streak_str}`\"\n member_list.append(new_line)\n\n new_content = '\\n'.join(member_list)\n\n # Update the existing post or create a new one if it doesn't exist\n if self.editable_weekly_post:\n self.editable_weekly_post = await self.editable_weekly_post.edit(content=new_content)\n else:\n self.editable_weekly_post = await self.channel.send(new_content)\n\n # Save the ID and timestamp of the post\n self.save_weekly_post_data()\n\n def format_date(self, dt: datetime) -> str:\n \"\"\"\n Formats a datetime object into a human-readable string.\n\n Args:\n dt: The datetime object to format.\n\n Returns:\n A human-readable date string.\n \"\"\"\n suffix = ['th', 'st', 'nd', 'rd']\n day = int(dt.strftime('%d'))\n if 4 <= day <= 20 or 24 <= day <= 30:\n suffix_index = 0 # use 'th'\n else:\n suffix_index = day % 10 # use 'st', 'nd', 'rd' as appropriate\n\n return dt.strftime(f\"%B {day}{suffix[suffix_index]}\")" }, { "identifier": "Scheduler", "path": "scheduler.py", "snippet": "class Scheduler:\n \"\"\"Scheduler class to manage timed jobs for sending status requests.\n\n Attributes:\n scheduler: The APScheduler object.\n job_ids: A dictionary to store lists of job IDs for each member.\n \"\"\"\n \n def __init__(self) -> None:\n \"\"\"Initialize the Scheduler object and start the APScheduler.\"\"\"\n self.scheduler: AsyncIOScheduler = AsyncIOScheduler()\n self.job_ids: Dict[int, List[str]] = {} # Store job IDs indexed by member's Discord ID\n self.weekly_post_job_id = None # To store the ID of the scheduled weekly post job\n self.scheduler.start()\n\n def add_job(self, func: callable, member: TeamMember, weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, updates_manager: UpdatesManager) -> None:\n \"\"\"Add a new job to the scheduler for a specific team member.\n \n Args:\n func: The function to call when the job is run.\n member: The TeamMember object for whom the job is added.\n \"\"\"\n time_zone = pytz.timezone(member.time_zone)\n \n weekday_trigger = CronTrigger(day_of_week='mon,tue,wed,thu,fri', hour=10, timezone=time_zone)\n weekend_trigger = CronTrigger(day_of_week='sat,sun', hour=11, timezone=time_zone)\n\n weekday_job = self.scheduler.add_job(func, weekday_trigger, args=[member, weekly_post_manager, streaks_manager, updates_manager])\n weekend_job = self.scheduler.add_job(func, weekend_trigger, args=[member, weekly_post_manager, streaks_manager, updates_manager])\n\n self.job_ids.setdefault(member.discord_id, []).extend([weekday_job.id, weekend_job.id])\n\n def remove_job(self, discord_id: int) -> None:\n \"\"\"Remove jobs for a specific team member.\n \n Args:\n discord_id: The Discord ID of the member for whom the job should be removed.\n \"\"\"\n job_ids = self.job_ids.get(discord_id, [])\n for job_id in job_ids:\n self.scheduler.remove_job(job_id)\n\n if discord_id in self.job_ids:\n del self.job_ids[discord_id] # Remove the job IDs from the dictionary\n\n def schedule_weekly_post(self, func: callable, weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, team_members: List[TeamMember]) -> None:\n \"\"\"Schedules the weekly post based on the latest time zone among the team members.\"\"\"\n \n # Determine the latest time zone\n latest_time_zone = max([member.time_zone for member in team_members], key=lambda tz: pytz.timezone(tz).utcoffset(datetime.utcnow()))\n\n # Set the trigger for 9:10 AM in the earliest time zone on Monday\n trigger = CronTrigger(day_of_week='mon', hour=9, minute=10, timezone=latest_time_zone)\n\n # Schedule the function with the trigger\n job = self.scheduler.add_job(func, trigger, args=[weekly_post_manager, streaks_manager, team_members])\n self.weekly_post_job_id = job.id\n\n def unschedule_weekly_post(self) -> None:\n \"\"\"Removes the weekly post job from the scheduler.\"\"\"\n if self.weekly_post_job_id:\n self.scheduler.remove_job(self.weekly_post_job_id)\n self.weekly_post_job_id = None\n\n def get_all_scheduled_jobs(self, team_member_manager) -> List[str]:\n \"\"\"Retrieve all scheduled jobs as a list of strings.\"\"\"\n job_descriptions = []\n\n for job in self.scheduler.get_jobs():\n # Determine the associated team member by looking up the job ID in the job_ids dictionary\n member_discord_id = next((discord_id for discord_id, job_ids in self.job_ids.items() if job.id in job_ids), None)\n member_name = team_member_manager.find_member(member_discord_id).name if member_discord_id else \"Unknown\"\n\n # Calculate the remaining time until the next run\n now = datetime.now(job.next_run_time.tzinfo) # Get the current time with the same timezone as the job's next_run_time\n remaining_time = job.next_run_time - now\n remaining_time_str = str(remaining_time).split('.')[0] # Remove the microseconds part\n\n # If this job is the weekly post job\n if job.id == self.weekly_post_job_id:\n job_descriptions.append(f\"ID: {job.id}, Type: Weekly Post, Next Run: {job.next_run_time}, Remaining Time: {remaining_time_str}, Func: {job.func.__name__}\")\n else:\n job_descriptions.append(f\"ID: {job.id}, Member: {member_name}, Next Run: {job.next_run_time}, Remaining Time: {remaining_time_str}, Func: {job.func.__name__}\")\n\n return job_descriptions" }, { "identifier": "TeamMember", "path": "team_members/team_member.py", "snippet": "class TeamMember:\n \"\"\"TeamMember class to store individual team member details.\n \n Attributes:\n discord_id: The Discord ID of the team member.\n time_zone: The time zone in which the team member resides.\n name: The name of the team member.\n github_username: The GitHub username of the team member.\n current_streak: The current streak of daily updates/check-ins of the team member.\n weekly_checkins: The number of check-ins for the current week.\n \"\"\"\n \n def __init__(self, discord_id: int, time_zone: str, name: str, github_username: str,\n current_streak: int = 0, weekly_checkins: int = 0, on_vacation: bool = False) -> None:\n \"\"\"Initialize a new TeamMember object.\n \n Args:\n discord_id: The Discord ID of the team member.\n time_zone: The time zone of the team member.\n name: The name of the team member.\n github_username: The GitHub username of the team member.\n current_streak: The current streak of daily updates/check-ins. Defaults to 0.\n weekly_checkins: The number of check-ins for the current week. Defaults to 0.\n \"\"\"\n self.discord_id: int = discord_id\n self.time_zone: str = time_zone\n self.name: str = name\n self.github_username: str = github_username\n self.current_streak: int = current_streak\n self.weekly_checkins: int = weekly_checkins\n self.on_vacation: bool = on_vacation\n \n def update_streak(self, streak: int) -> None:\n \"\"\"Update the current streak of the team member.\n \n Args:\n streak: The new streak count.\n \"\"\"\n self.current_streak = streak\n \n def reset_streak(self) -> None:\n \"\"\"Reset the current streak of the team member to 0.\"\"\"\n self.current_streak = 0\n\n def update_weekly_checkins(self, count: int):\n \"\"\"\n Update the weekly check-ins count.\n\n Args:\n count: The new count of weekly check-ins.\n \"\"\"\n self.weekly_checkins = count\n \n def increment_weekly_checkins(self) -> None:\n \"\"\"Increment the number of check-ins for the current week by 1.\"\"\"\n self.weekly_checkins += 1\n \n def reset_weekly_checkins(self) -> None:\n \"\"\"Reset the number of check-ins for the current week to 0.\"\"\"\n self.weekly_checkins = 0" } ]
import os import pytz import asyncio import openai import requests from typing import List from dotenv import load_dotenv from datetime import datetime, timedelta from multiprocessing import Process from streaks.streaks_db import StreaksDB from team_members.team_member_db import TeamMemberDB from updates.updates_db import UpdatesDB from weekly_posts.weekly_posts_db import WeeklyPostsDB from streaks.streaks_manager import StreaksManager from team_members.team_member_manager import TeamMemberManager from updates.updates_manager import UpdatesManager from weekly_posts.weekly_post_manager import WeeklyPostManager from scheduler import Scheduler from team_members.team_member import TeamMember from discord.ext import commands, tasks from discord import Intents, DMChannel from flask import Flask from asyncio import Task, ensure_future, CancelledError
12,661
# Import required modules app = Flask(__name__) # Load environment variables from the .env file load_dotenv() # Retrieve bot, guild, and channel tokens from environment variables BOT_TOKEN = os.getenv('DISCORD_BOT_TOKEN') GUILD_TOKEN = int(os.getenv('DISCORD_GUILD_TOKEN')) CHANNEL_TOKEN = int(os.getenv('DISCORD_CHANNEL_TOKEN')) ADMIN_DISCORD_ID = int(os.getenv('ADMIN_DISCORD_ID')) # Retrieve database credentials from environment variables MYSQL_HOST = os.getenv('MYSQL_HOST') MYSQL_USER = os.getenv('MYSQL_USER') MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD') MYSQL_DB = os.getenv('MYSQL_DB') MYSQL_PORT = os.getenv('MYSQL_PORT') ORG_NAME = os.getenv('GITHUB_ORG_NAME') ORG_TOKEN = os.getenv('GITHUB_ORG_TOKEN') OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') # Initialize bot with default intents intents = Intents.default() intents.members = True intents.message_content = True bot = commands.Bot(command_prefix='!', intents=intents) openai.api_key = OPENAI_API_KEY # TODO: Remove these globals streaks_manager = None weekly_post_manager = None team_member_manager = None updates_manager = None scheduler = None ongoing_status_requests = {} THUMBS_UP_EMOJI = "👍" PENCIL_EMOJI = "✏️" REPORT_SUBMISSION_EMOJI = '📝'
# Import required modules app = Flask(__name__) # Load environment variables from the .env file load_dotenv() # Retrieve bot, guild, and channel tokens from environment variables BOT_TOKEN = os.getenv('DISCORD_BOT_TOKEN') GUILD_TOKEN = int(os.getenv('DISCORD_GUILD_TOKEN')) CHANNEL_TOKEN = int(os.getenv('DISCORD_CHANNEL_TOKEN')) ADMIN_DISCORD_ID = int(os.getenv('ADMIN_DISCORD_ID')) # Retrieve database credentials from environment variables MYSQL_HOST = os.getenv('MYSQL_HOST') MYSQL_USER = os.getenv('MYSQL_USER') MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD') MYSQL_DB = os.getenv('MYSQL_DB') MYSQL_PORT = os.getenv('MYSQL_PORT') ORG_NAME = os.getenv('GITHUB_ORG_NAME') ORG_TOKEN = os.getenv('GITHUB_ORG_TOKEN') OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') # Initialize bot with default intents intents = Intents.default() intents.members = True intents.message_content = True bot = commands.Bot(command_prefix='!', intents=intents) openai.api_key = OPENAI_API_KEY # TODO: Remove these globals streaks_manager = None weekly_post_manager = None team_member_manager = None updates_manager = None scheduler = None ongoing_status_requests = {} THUMBS_UP_EMOJI = "👍" PENCIL_EMOJI = "✏️" REPORT_SUBMISSION_EMOJI = '📝'
async def weekly_state_reset(weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, team_members: List[TeamMember]):
9
2023-10-12 02:01:46+00:00
16k
azuline/rose
rose/tracks_test.py
[ { "identifier": "AudioTags", "path": "rose/audiotags.py", "snippet": "class AudioTags:\n id: str | None\n release_id: str | None\n title: str | None\n year: int | None\n tracknumber: str | None\n tracktotal: int | None\n discnumber: str | None\n disctotal: int | None\n album: str | None\n genre: list[str]\n label: list[str]\n releasetype: str\n\n albumartists: ArtistMapping\n trackartists: ArtistMapping\n\n duration_sec: int\n\n path: Path\n\n @classmethod\n def from_file(cls, p: Path) -> AudioTags:\n \"\"\"Read the tags of an audio file on disk.\"\"\"\n if not any(p.suffix.lower() == ext for ext in SUPPORTED_AUDIO_EXTENSIONS):\n raise UnsupportedFiletypeError(f\"{p.suffix} not a supported filetype\")\n try:\n m = mutagen.File(p) # type: ignore\n except mutagen.MutagenError as e: # type: ignore\n raise UnsupportedFiletypeError(f\"Failed to open file: {e}\") from e\n if isinstance(m, mutagen.mp3.MP3):\n # ID3 returns trackno/discno tags as no/total. We have to parse.\n tracknumber = discnumber = tracktotal = disctotal = None\n if tracknos := _get_tag(m.tags, [\"TRCK\"]):\n try:\n tracknumber, tracktotalstr = tracknos.split(\"/\", 1)\n tracktotal = _parse_int(tracktotalstr)\n except ValueError:\n tracknumber = tracknos\n if discnos := _get_tag(m.tags, [\"TPOS\"]):\n try:\n discnumber, disctotalstr = discnos.split(\"/\", 1)\n disctotal = _parse_int(disctotalstr)\n except ValueError:\n discnumber = discnos\n\n def _get_paired_frame(x: str) -> str | None:\n if not m.tags:\n return None\n for tag in [\"TIPL\", \"IPLS\"]:\n try:\n frame = m.tags[tag]\n except KeyError:\n continue\n return r\" \\\\ \".join([p[1] for p in frame.people if p[0].lower() == x.lower()])\n return None\n\n return AudioTags(\n id=_get_tag(m.tags, [\"TXXX:ROSEID\"]),\n release_id=_get_tag(m.tags, [\"TXXX:ROSERELEASEID\"]),\n title=_get_tag(m.tags, [\"TIT2\"]),\n year=_parse_year(_get_tag(m.tags, [\"TDRC\", \"TYER\"])),\n tracknumber=tracknumber,\n tracktotal=tracktotal,\n discnumber=discnumber,\n disctotal=disctotal,\n album=_get_tag(m.tags, [\"TALB\"]),\n genre=_split_tag(_get_tag(m.tags, [\"TCON\"], split=True)),\n label=_split_tag(_get_tag(m.tags, [\"TPUB\"], split=True)),\n releasetype=_normalize_rtype(_get_tag(m.tags, [\"TXXX:RELEASETYPE\"], first=True)),\n albumartists=parse_artist_string(main=_get_tag(m.tags, [\"TPE2\"], split=True)),\n trackartists=parse_artist_string(\n main=_get_tag(m.tags, [\"TPE1\"], split=True),\n remixer=_get_tag(m.tags, [\"TPE4\"], split=True),\n composer=_get_tag(m.tags, [\"TCOM\"], split=True),\n conductor=_get_tag(m.tags, [\"TPE3\"], split=True),\n producer=_get_paired_frame(\"producer\"),\n dj=_get_paired_frame(\"DJ-mix\"),\n ),\n duration_sec=round(m.info.length),\n path=p,\n )\n if isinstance(m, mutagen.mp4.MP4):\n tracknumber = discnumber = tracktotal = disctotal = None\n with contextlib.suppress(ValueError):\n tracknumber, tracktotalstr = _get_tuple_tag(m.tags, [\"trkn\"]) # type: ignore\n tracktotal = _parse_int(tracktotalstr)\n with contextlib.suppress(ValueError):\n discnumber, disctotalstr = _get_tuple_tag(m.tags, [\"disk\"]) # type: ignore\n disctotal = _parse_int(disctotalstr)\n\n return AudioTags(\n id=_get_tag(m.tags, [\"----:net.sunsetglow.rose:ID\"]),\n release_id=_get_tag(m.tags, [\"----:net.sunsetglow.rose:RELEASEID\"]),\n title=_get_tag(m.tags, [\"\\xa9nam\"]),\n year=_parse_year(_get_tag(m.tags, [\"\\xa9day\"])),\n tracknumber=str(tracknumber),\n tracktotal=tracktotal,\n discnumber=str(discnumber),\n disctotal=disctotal,\n album=_get_tag(m.tags, [\"\\xa9alb\"]),\n genre=_split_tag(_get_tag(m.tags, [\"\\xa9gen\"], split=True)),\n label=_split_tag(_get_tag(m.tags, [\"----:com.apple.iTunes:LABEL\"], split=True)),\n releasetype=_normalize_rtype(\n _get_tag(m.tags, [\"----:com.apple.iTunes:RELEASETYPE\"], first=True)\n ),\n albumartists=parse_artist_string(main=_get_tag(m.tags, [\"aART\"], split=True)),\n trackartists=parse_artist_string(\n main=_get_tag(m.tags, [\"\\xa9ART\"], split=True),\n remixer=_get_tag(m.tags, [\"----:com.apple.iTunes:REMIXER\"], split=True),\n producer=_get_tag(m.tags, [\"----:com.apple.iTunes:PRODUCER\"], split=True),\n composer=_get_tag(m.tags, [\"\\xa9wrt\"], split=True),\n conductor=_get_tag(m.tags, [\"----:com.apple.iTunes:CONDUCTOR\"], split=True),\n dj=_get_tag(m.tags, [\"----:com.apple.iTunes:DJMIXER\"], split=True),\n ),\n duration_sec=round(m.info.length), # type: ignore\n path=p,\n )\n if isinstance(m, (mutagen.flac.FLAC, mutagen.oggvorbis.OggVorbis, mutagen.oggopus.OggOpus)):\n return AudioTags(\n id=_get_tag(m.tags, [\"roseid\"]),\n release_id=_get_tag(m.tags, [\"rosereleaseid\"]),\n title=_get_tag(m.tags, [\"title\"]),\n year=_parse_year(_get_tag(m.tags, [\"date\", \"year\"])),\n tracknumber=_get_tag(m.tags, [\"tracknumber\"], first=True),\n tracktotal=_parse_int(_get_tag(m.tags, [\"tracktotal\"], first=True)),\n discnumber=_get_tag(m.tags, [\"discnumber\"], first=True),\n disctotal=_parse_int(_get_tag(m.tags, [\"disctotal\"], first=True)),\n album=_get_tag(m.tags, [\"album\"]),\n genre=_split_tag(_get_tag(m.tags, [\"genre\"], split=True)),\n label=_split_tag(\n _get_tag(m.tags, [\"organization\", \"label\", \"recordlabel\"], split=True)\n ),\n releasetype=_normalize_rtype(_get_tag(m.tags, [\"releasetype\"], first=True)),\n albumartists=parse_artist_string(\n main=_get_tag(m.tags, [\"albumartist\"], split=True)\n ),\n trackartists=parse_artist_string(\n main=_get_tag(m.tags, [\"artist\"], split=True),\n remixer=_get_tag(m.tags, [\"remixer\"], split=True),\n producer=_get_tag(m.tags, [\"producer\"], split=True),\n composer=_get_tag(m.tags, [\"composer\"], split=True),\n conductor=_get_tag(m.tags, [\"conductor\"], split=True),\n dj=_get_tag(m.tags, [\"djmixer\"], split=True),\n ),\n duration_sec=round(m.info.length), # type: ignore\n path=p,\n )\n raise UnsupportedFiletypeError(f\"{p} is not a supported audio file\")\n\n @no_type_check\n def flush(self, *, validate: bool = True) -> None:\n \"\"\"Flush the current tags to the file on disk.\"\"\"\n m = mutagen.File(self.path)\n if not validate and \"pytest\" not in sys.modules:\n raise Exception(\"Validate can only be turned off by tests.\")\n\n self.releasetype = (self.releasetype or \"unknown\").lower()\n if validate and self.releasetype not in SUPPORTED_RELEASE_TYPES:\n raise UnsupportedTagValueTypeError(\n f\"Release type {self.releasetype} is not a supported release type.\\n\"\n f\"Supported release types: {', '.join(SUPPORTED_RELEASE_TYPES)}\"\n )\n\n if isinstance(m, mutagen.mp3.MP3):\n if m.tags is None:\n m.tags = mutagen.id3.ID3()\n\n def _write_standard_tag(key: str, value: str | None) -> None:\n m.tags.delall(key)\n frame = getattr(mutagen.id3, key)(text=value)\n if value:\n m.tags.add(frame)\n\n def _write_tag_with_description(name: str, value: str | None) -> None:\n key, desc = name.split(\":\", 1)\n # Since the ID3 tags work with the shared prefix key before `:`, manually preserve\n # the other tags with the shared prefix key.\n keep_fields = [f for f in m.tags.getall(key) if getattr(f, \"desc\", None) != desc]\n m.tags.delall(key)\n if value:\n frame = getattr(mutagen.id3, key)(desc=desc, text=value)\n m.tags.add(frame)\n for f in keep_fields:\n m.tags.add(f)\n\n _write_tag_with_description(\"TXXX:ROSEID\", self.id)\n _write_tag_with_description(\"TXXX:ROSERELEASEID\", self.release_id)\n _write_standard_tag(\"TIT2\", self.title)\n _write_standard_tag(\"TDRC\", str(self.year).zfill(4))\n _write_standard_tag(\"TRCK\", self.tracknumber)\n _write_standard_tag(\"TPOS\", self.discnumber)\n _write_standard_tag(\"TALB\", self.album)\n _write_standard_tag(\"TCON\", \";\".join(self.genre))\n _write_standard_tag(\"TPUB\", \";\".join(self.label))\n _write_tag_with_description(\"TXXX:RELEASETYPE\", self.releasetype)\n _write_standard_tag(\"TPE2\", format_artist_string(self.albumartists))\n _write_standard_tag(\"TPE1\", format_artist_string(self.trackartists))\n # Wipe the alt. role artist tags, since we encode the full artist into the main tag.\n m.tags.delall(\"TPE4\")\n m.tags.delall(\"TCOM\")\n m.tags.delall(\"TPE3\")\n # Delete all paired text frames, since these represent additional artist roles. We don't\n # want to preserve them.\n m.tags.delall(\"TIPL\")\n m.tags.delall(\"IPLS\")\n m.save()\n return\n if isinstance(m, mutagen.mp4.MP4):\n if m.tags is None:\n m.tags = mutagen.mp4.MP4Tags()\n m.tags[\"----:net.sunsetglow.rose:ID\"] = (self.id or \"\").encode()\n m.tags[\"----:net.sunsetglow.rose:RELEASEID\"] = (self.release_id or \"\").encode()\n m.tags[\"\\xa9nam\"] = self.title or \"\"\n m.tags[\"\\xa9day\"] = str(self.year).zfill(4)\n m.tags[\"\\xa9alb\"] = self.album or \"\"\n m.tags[\"\\xa9gen\"] = \";\".join(self.genre)\n m.tags[\"----:com.apple.iTunes:LABEL\"] = \";\".join(self.label).encode()\n m.tags[\"----:com.apple.iTunes:RELEASETYPE\"] = self.releasetype.encode()\n m.tags[\"aART\"] = format_artist_string(self.albumartists)\n m.tags[\"\\xa9ART\"] = format_artist_string(self.trackartists)\n # Wipe the alt. role artist tags, since we encode the full artist into the main tag.\n with contextlib.suppress(KeyError):\n del m.tags[\"----:com.apple.iTunes:REMIXER\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"----:com.apple.iTunes:PRODUCER\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"\\xa9wrt\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"----:com.apple.iTunes:CONDUCTOR\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"----:com.apple.iTunes:DJMIXER\"]\n\n # The track and disc numbers in MP4 are a bit annoying, because they must be a\n # single-element list of 2-tuple ints. We preserve the previous tracktotal/disctotal (as\n # Rose does not care about those values), and then attempt to write our own tracknumber\n # and discnumber.\n try:\n prev_tracktotal = m.tags[\"trkn\"][0][1]\n except (KeyError, IndexError):\n prev_tracktotal = 1\n try:\n prev_disctotal = m.tags[\"disk\"][0][1]\n except (KeyError, IndexError):\n prev_disctotal = 1\n try:\n m.tags[\"trkn\"] = [(int(self.tracknumber or \"0\"), prev_tracktotal)]\n m.tags[\"disk\"] = [(int(self.discnumber or \"0\"), prev_disctotal)]\n except ValueError as e:\n raise UnsupportedTagValueTypeError(\n \"Could not write m4a trackno/discno tags: must be integers. \"\n f\"Got: {self.tracknumber=} / {self.discnumber=}\"\n ) from e\n\n m.save()\n return\n if isinstance(m, (mutagen.flac.FLAC, mutagen.oggvorbis.OggVorbis, mutagen.oggopus.OggOpus)):\n if m.tags is None:\n if isinstance(m, mutagen.flac.FLAC):\n m.tags = mutagen.flac.VCFLACDict()\n elif isinstance(m, mutagen.oggvorbis.OggVorbis):\n m.tags = mutagen.oggvorbis.OggVCommentDict()\n else:\n m.tags = mutagen.oggopus.OggOpusVComment()\n assert not isinstance(m.tags, mutagen.flac.MetadataBlock)\n m.tags[\"roseid\"] = self.id or \"\"\n m.tags[\"rosereleaseid\"] = self.release_id or \"\"\n m.tags[\"title\"] = self.title or \"\"\n m.tags[\"date\"] = str(self.year).zfill(4)\n m.tags[\"tracknumber\"] = self.tracknumber or \"\"\n m.tags[\"discnumber\"] = self.discnumber or \"\"\n m.tags[\"album\"] = self.album or \"\"\n m.tags[\"genre\"] = \";\".join(self.genre)\n m.tags[\"organization\"] = \";\".join(self.label)\n m.tags[\"releasetype\"] = self.releasetype\n m.tags[\"albumartist\"] = format_artist_string(self.albumartists)\n m.tags[\"artist\"] = format_artist_string(self.trackartists)\n # Wipe the alt. role artist tags, since we encode the full artist into the main tag.\n with contextlib.suppress(KeyError):\n del m.tags[\"remixer\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"producer\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"composer\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"conductor\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"djmixer\"]\n m.save()\n return\n\n raise RoseError(f\"Impossible: unknown mutagen type: {type(m)=} ({repr(m)=})\")" }, { "identifier": "Config", "path": "rose/config.py", "snippet": "class Config:\n music_source_dir: Path\n fuse_mount_dir: Path\n cache_dir: Path\n # Maximum parallel processes for cache updates. Defaults to nproc/2.\n max_proc: int\n ignore_release_directories: list[str]\n\n # A map from parent artist -> subartists.\n artist_aliases_map: dict[str, list[str]]\n # A map from subartist -> parent artists.\n artist_aliases_parents_map: dict[str, list[str]]\n\n fuse_artists_whitelist: list[str] | None\n fuse_genres_whitelist: list[str] | None\n fuse_labels_whitelist: list[str] | None\n fuse_artists_blacklist: list[str] | None\n fuse_genres_blacklist: list[str] | None\n fuse_labels_blacklist: list[str] | None\n\n cover_art_stems: list[str]\n valid_art_exts: list[str]\n\n rename_source_files: bool\n path_templates: PathTemplateConfig\n\n stored_metadata_rules: list[MetadataRule]\n\n @classmethod\n def parse(cls, config_path_override: Path | None = None) -> Config:\n # As we parse, delete consumed values from the data dictionary. If any are left over at the\n # end of the config, warn that unknown config keys were found.\n cfgpath = config_path_override or CONFIG_PATH\n cfgtext = \"\"\n try:\n with cfgpath.open(\"r\") as fp:\n cfgtext = fp.read()\n data = tomllib.loads(cfgtext)\n except FileNotFoundError as e:\n raise ConfigNotFoundError(f\"Configuration file not found ({cfgpath})\") from e\n except tomllib.TOMLDecodeError as e:\n raise ConfigDecodeError(\n f\"Failed to decode configuration file: invalid TOML: {e}\"\n ) from e\n\n try:\n music_source_dir = Path(data[\"music_source_dir\"]).expanduser()\n del data[\"music_source_dir\"]\n except KeyError as e:\n raise MissingConfigKeyError(\n f\"Missing key music_source_dir in configuration file ({cfgpath})\"\n ) from e\n except (ValueError, TypeError) as e:\n raise InvalidConfigValueError(\n f\"Invalid value for music_source_dir in configuration file ({cfgpath}): must be a path\"\n ) from e\n\n try:\n fuse_mount_dir = Path(data[\"fuse_mount_dir\"]).expanduser()\n del data[\"fuse_mount_dir\"]\n except KeyError as e:\n raise MissingConfigKeyError(\n f\"Missing key fuse_mount_dir in configuration file ({cfgpath})\"\n ) from e\n except (ValueError, TypeError) as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_mount_dir in configuration file ({cfgpath}): must be a path\"\n ) from e\n\n try:\n cache_dir = Path(data[\"cache_dir\"]).expanduser()\n del data[\"cache_dir\"]\n except KeyError:\n cache_dir = XDG_CACHE_ROSE\n except (TypeError, ValueError) as e:\n raise InvalidConfigValueError(\n f\"Invalid value for cache_dir in configuration file ({cfgpath}): must be a path\"\n ) from e\n cache_dir.mkdir(parents=True, exist_ok=True)\n\n try:\n max_proc = int(data[\"max_proc\"])\n del data[\"max_proc\"]\n if max_proc <= 0:\n raise ValueError(f\"must be a positive integer: got {max_proc}\")\n except KeyError:\n max_proc = max(1, multiprocessing.cpu_count() // 2)\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for max_proc in configuration file ({cfgpath}): must be a positive integer\"\n ) from e\n\n artist_aliases_map: dict[str, list[str]] = defaultdict(list)\n artist_aliases_parents_map: dict[str, list[str]] = defaultdict(list)\n try:\n for entry in data.get(\"artist_aliases\", []):\n if not isinstance(entry[\"artist\"], str):\n raise ValueError(f\"Artists must be of type str: got {type(entry['artist'])}\")\n artist_aliases_map[entry[\"artist\"]] = entry[\"aliases\"]\n if not isinstance(entry[\"aliases\"], list):\n raise ValueError(\n f\"Aliases must be of type list[str]: got {type(entry['aliases'])}\"\n )\n for s in entry[\"aliases\"]:\n if not isinstance(s, str):\n raise ValueError(f\"Each alias must be of type str: got {type(s)}\")\n artist_aliases_parents_map[s].append(entry[\"artist\"])\n with contextlib.suppress(KeyError):\n del data[\"artist_aliases\"]\n except (ValueError, TypeError, KeyError) as e:\n raise InvalidConfigValueError(\n f\"Invalid value for artist_aliases in configuration file ({cfgpath}): must be a list of {{ artist = str, aliases = list[str] }} records\"\n ) from e\n\n try:\n fuse_artists_whitelist = data[\"fuse_artists_whitelist\"]\n del data[\"fuse_artists_whitelist\"]\n if not isinstance(fuse_artists_whitelist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_artists_whitelist)}\")\n for s in fuse_artists_whitelist:\n if not isinstance(s, str):\n raise ValueError(f\"Each artist must be of type str: got {type(s)}\")\n except KeyError:\n fuse_artists_whitelist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_artists_whitelist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n fuse_genres_whitelist = data[\"fuse_genres_whitelist\"]\n del data[\"fuse_genres_whitelist\"]\n if not isinstance(fuse_genres_whitelist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_genres_whitelist)}\")\n for s in fuse_genres_whitelist:\n if not isinstance(s, str):\n raise ValueError(f\"Each genre must be of type str: got {type(s)}\")\n except KeyError:\n fuse_genres_whitelist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_genres_whitelist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n fuse_labels_whitelist = data[\"fuse_labels_whitelist\"]\n del data[\"fuse_labels_whitelist\"]\n if not isinstance(fuse_labels_whitelist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_labels_whitelist)}\")\n for s in fuse_labels_whitelist:\n if not isinstance(s, str):\n raise ValueError(f\"Each label must be of type str: got {type(s)}\")\n except KeyError:\n fuse_labels_whitelist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_labels_whitelist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n fuse_artists_blacklist = data[\"fuse_artists_blacklist\"]\n del data[\"fuse_artists_blacklist\"]\n if not isinstance(fuse_artists_blacklist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_artists_blacklist)}\")\n for s in fuse_artists_blacklist:\n if not isinstance(s, str):\n raise ValueError(f\"Each artist must be of type str: got {type(s)}\")\n except KeyError:\n fuse_artists_blacklist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_artists_blacklist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n fuse_genres_blacklist = data[\"fuse_genres_blacklist\"]\n del data[\"fuse_genres_blacklist\"]\n if not isinstance(fuse_genres_blacklist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_genres_blacklist)}\")\n for s in fuse_genres_blacklist:\n if not isinstance(s, str):\n raise ValueError(f\"Each genre must be of type str: got {type(s)}\")\n except KeyError:\n fuse_genres_blacklist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_genres_blacklist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n fuse_labels_blacklist = data[\"fuse_labels_blacklist\"]\n del data[\"fuse_labels_blacklist\"]\n if not isinstance(fuse_labels_blacklist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_labels_blacklist)}\")\n for s in fuse_labels_blacklist:\n if not isinstance(s, str):\n raise ValueError(f\"Each label must be of type str: got {type(s)}\")\n except KeyError:\n fuse_labels_blacklist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_labels_blacklist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n if fuse_artists_whitelist and fuse_artists_blacklist:\n raise InvalidConfigValueError(\n f\"Cannot specify both fuse_artists_whitelist and fuse_artists_blacklist in configuration file ({cfgpath}): must specify only one or the other\"\n )\n if fuse_genres_whitelist and fuse_genres_blacklist:\n raise InvalidConfigValueError(\n f\"Cannot specify both fuse_genres_whitelist and fuse_genres_blacklist in configuration file ({cfgpath}): must specify only one or the other\"\n )\n if fuse_labels_whitelist and fuse_labels_blacklist:\n raise InvalidConfigValueError(\n f\"Cannot specify both fuse_labels_whitelist and fuse_labels_blacklist in configuration file ({cfgpath}): must specify only one or the other\"\n )\n\n try:\n cover_art_stems = data[\"cover_art_stems\"]\n del data[\"cover_art_stems\"]\n if not isinstance(cover_art_stems, list):\n raise ValueError(f\"Must be a list[str]: got {type(cover_art_stems)}\")\n for s in cover_art_stems:\n if not isinstance(s, str):\n raise ValueError(f\"Each cover art stem must be of type str: got {type(s)}\")\n except KeyError:\n cover_art_stems = [\"folder\", \"cover\", \"art\", \"front\"]\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for cover_art_stems in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n valid_art_exts = data[\"valid_art_exts\"]\n del data[\"valid_art_exts\"]\n if not isinstance(valid_art_exts, list):\n raise ValueError(f\"Must be a list[str]: got {type(valid_art_exts)}\")\n for s in valid_art_exts:\n if not isinstance(s, str):\n raise ValueError(f\"Each art extension must be of type str: got {type(s)}\")\n except KeyError:\n valid_art_exts = [\"jpg\", \"jpeg\", \"png\"]\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for valid_art_exts in configuration file ({cfgpath}): {e}\"\n ) from e\n\n cover_art_stems = [x.lower() for x in cover_art_stems]\n valid_art_exts = [x.lower() for x in valid_art_exts]\n\n try:\n rename_source_files = data[\"rename_source_files\"]\n del data[\"rename_source_files\"]\n if not isinstance(rename_source_files, bool):\n raise ValueError(f\"Must be a bool: got {type(rename_source_files)}\")\n except KeyError:\n rename_source_files = False\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for rename_source_files in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n ignore_release_directories = data[\"ignore_release_directories\"]\n del data[\"ignore_release_directories\"]\n if not isinstance(ignore_release_directories, list):\n raise ValueError(f\"Must be a list[str]: got {type(ignore_release_directories)}\")\n for s in ignore_release_directories:\n if not isinstance(s, str):\n raise ValueError(f\"Each release directory must be of type str: got {type(s)}\")\n except KeyError:\n ignore_release_directories = []\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for ignore_release_directories in configuration file ({cfgpath}): {e}\"\n ) from e\n\n stored_metadata_rules: list[MetadataRule] = []\n for d in data.get(\"stored_metadata_rules\", []):\n if not isinstance(d, dict):\n raise InvalidConfigValueError(\n f\"Invalid value in stored_metadata_rules in configuration file ({cfgpath}): list values must be a dict: got {type(d)}\"\n )\n\n try:\n matcher = d[\"matcher\"]\n except KeyError as e:\n raise InvalidConfigValueError(\n f\"Missing key `matcher` in stored_metadata_rules in configuration file ({cfgpath}): rule {d}\"\n ) from e\n if not isinstance(matcher, str):\n raise InvalidConfigValueError(\n f\"Invalid value for `matcher` in stored_metadata_rules in configuration file ({cfgpath}): rule {d}: must be a string\"\n )\n\n try:\n actions = d[\"actions\"]\n except KeyError as e:\n raise InvalidConfigValueError(\n f\"Missing key `actions` in stored_metadata_rules in configuration file ({cfgpath}): rule {d}\"\n ) from e\n if not isinstance(actions, list):\n raise InvalidConfigValueError(\n f\"Invalid value for `actions` in stored_metadata_rules in configuration file ({cfgpath}): rule {d}: must be a list of strings\"\n )\n for action in actions:\n if not isinstance(action, str):\n raise InvalidConfigValueError(\n f\"Invalid value for `actions` in stored_metadata_rules in configuration file ({cfgpath}): rule {d}: must be a list of strings: got {type(action)}\"\n )\n\n try:\n stored_metadata_rules.append(MetadataRule.parse(matcher, actions))\n except RuleSyntaxError as e:\n raise InvalidConfigValueError(\n f\"Failed to parse stored_metadata_rules in configuration file ({cfgpath}): rule {d}: {e}\"\n ) from e\n if \"stored_metadata_rules\" in data:\n del data[\"stored_metadata_rules\"]\n\n # Get the potential default template before evaluating the rest.\n default_templates = deepcopy(DEFAULT_TEMPLATE_PAIR)\n with contextlib.suppress(KeyError):\n default_templates.release = PathTemplate(data[\"path_templates\"][\"default\"][\"release\"])\n del data[\"path_templates\"][\"default\"][\"release\"]\n with contextlib.suppress(KeyError):\n default_templates.track = PathTemplate(data[\"path_templates\"][\"default\"][\"track\"])\n del data[\"path_templates\"][\"default\"][\"track\"]\n with contextlib.suppress(KeyError):\n if not data[\"path_templates\"][\"default\"]:\n del data[\"path_templates\"][\"default\"]\n\n path_templates = PathTemplateConfig.with_defaults(default_templates)\n if tmpl_config := data.get(\"path_templates\", None):\n for key in [\n \"source\",\n \"all_releases\",\n \"new_releases\",\n \"recently_added_releases\",\n \"artists\",\n \"genres\",\n \"labels\",\n \"collages\",\n ]:\n with contextlib.suppress(KeyError):\n getattr(path_templates, key).release = PathTemplate(tmpl_config[key][\"release\"])\n del tmpl_config[key][\"release\"]\n with contextlib.suppress(KeyError):\n getattr(path_templates, key).track = PathTemplate(tmpl_config[key][\"track\"])\n del tmpl_config[key][\"track\"]\n with contextlib.suppress(KeyError):\n if not tmpl_config[key]:\n del tmpl_config[key]\n\n with contextlib.suppress(KeyError):\n path_templates.playlists = PathTemplate(tmpl_config[\"playlists\"])\n del tmpl_config[\"playlists\"]\n with contextlib.suppress(KeyError):\n if not data[\"path_templates\"]:\n del data[\"path_templates\"]\n\n try:\n path_templates.parse()\n except InvalidPathTemplateError as e:\n raise InvalidConfigValueError(\n f\"Invalid path template in configuration file ({cfgpath}) for template {e.key}: {e}\"\n ) from e\n\n if data:\n unrecognized_accessors: list[str] = []\n # Do a DFS over the data keys to assemble the map of unknown keys. State is a tuple of\n # (\"accessor\", node).\n dfs_state: deque[tuple[str, dict[str, Any]]] = deque([(\"\", data)])\n while dfs_state:\n accessor, node = dfs_state.pop()\n if isinstance(node, dict):\n for k, v in node.items():\n child_accessor = k if not accessor else f\"{accessor}.{k}\"\n dfs_state.append((child_accessor, v))\n continue\n unrecognized_accessors.append(accessor)\n logger.warning(\n f\"Unrecognized options found in configuration file: {', '.join(unrecognized_accessors)}\"\n )\n\n return Config(\n music_source_dir=music_source_dir,\n fuse_mount_dir=fuse_mount_dir,\n cache_dir=cache_dir,\n max_proc=max_proc,\n artist_aliases_map=artist_aliases_map,\n artist_aliases_parents_map=artist_aliases_parents_map,\n fuse_artists_whitelist=fuse_artists_whitelist,\n fuse_genres_whitelist=fuse_genres_whitelist,\n fuse_labels_whitelist=fuse_labels_whitelist,\n fuse_artists_blacklist=fuse_artists_blacklist,\n fuse_genres_blacklist=fuse_genres_blacklist,\n fuse_labels_blacklist=fuse_labels_blacklist,\n cover_art_stems=cover_art_stems,\n valid_art_exts=valid_art_exts,\n path_templates=path_templates,\n rename_source_files=rename_source_files,\n ignore_release_directories=ignore_release_directories,\n stored_metadata_rules=stored_metadata_rules,\n )\n\n @functools.cached_property\n def valid_cover_arts(self) -> list[str]:\n return [s + \".\" + e for s in self.cover_art_stems for e in self.valid_art_exts]\n\n @functools.cached_property\n def cache_database_path(self) -> Path:\n return self.cache_dir / \"cache.sqlite3\"\n\n @functools.cached_property\n def watchdog_pid_path(self) -> Path:\n return self.cache_dir / \"watchdog.pid\"\n\n @functools.cached_property\n def sanitized_artist_aliases_map(self) -> dict[str, list[str]]:\n return {sanitize_dirname(k, False): v for k, v in self.artist_aliases_map.items()}\n\n @functools.cached_property\n def sanitized_artist_aliases_parents_map(self) -> dict[str, list[str]]:\n return {sanitize_dirname(k, False): v for k, v in self.artist_aliases_parents_map.items()}" }, { "identifier": "MetadataAction", "path": "rose/rule_parser.py", "snippet": "class MetadataAction:\n # The behavior of the action, along with behavior-specific parameters.\n behavior: ReplaceAction | SedAction | SplitAction | AddAction | DeleteAction\n # The tags to apply the action on. Defaults to the tag that the pattern matched.\n tags: list[Tag]\n # Only apply the action on values that match this pattern. None means that all values are acted\n # upon.\n pattern: MatcherPattern | None = None\n\n def __str__(self) -> str:\n r = \"\"\n r += stringify_tags(self.tags)\n if self.pattern:\n r += \":\" + str(self.pattern)\n if r:\n r += \"::\"\n\n if isinstance(self.behavior, ReplaceAction):\n r += \"replace\"\n elif isinstance(self.behavior, SedAction):\n r += \"sed\"\n elif isinstance(self.behavior, SplitAction):\n r += \"split\"\n elif isinstance(self.behavior, AddAction):\n r += \"add\"\n elif isinstance(self.behavior, DeleteAction):\n r += \"delete\"\n\n if isinstance(self.behavior, ReplaceAction):\n r += \":\" + self.behavior.replacement\n elif isinstance(self.behavior, SedAction):\n r += \":\" + str(self.behavior.src.pattern).replace(\":\", r\"\\:\")\n r += \":\"\n r += self.behavior.dst.replace(\":\", r\"\\:\")\n elif isinstance(self.behavior, SplitAction):\n r += \":\" + self.behavior.delimiter\n return r\n\n @classmethod\n def parse(\n cls,\n raw: str,\n action_number: int | None = None,\n # If there is a matcher for the action, pass it here to set the defaults.\n matcher: MetadataMatcher | None = None,\n ) -> MetadataAction:\n idx = 0\n # Common arguments to feed into Syntax Error.\n err = {\"rule\": raw, \"rule_name\": \"action\"}\n if action_number:\n err[\"rule_name\"] += f\" {action_number}\"\n\n # First, determine whether we have a matcher section or not. The matcher section is optional,\n # but present if there is an unescaped `::`.\n _, action_idx = take(raw, \"::\")\n has_tags_pattern_section = action_idx != len(raw)\n\n # Parse the (optional) tags+pattern section.\n if not has_tags_pattern_section:\n if not matcher:\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=\"Tags/pattern section not found. \"\n \"Must specify tags to modify, since there is no matcher to default to. \"\n \"Make sure you are formatting your action like {tags}:{pattern}::{kind}:{args} (where `:{pattern}` is optional)\",\n )\n tags: list[Tag] = [x for x in matcher.tags if x in MODIFIABLE_TAGS]\n pattern = matcher.pattern.pattern\n case_insensitive = matcher.pattern.case_insensitive\n else:\n # First, parse the tags. If the tag is matched, keep going, otherwise employ the list\n # parsing logic.\n if raw[idx:].startswith(\"matched:\"):\n if not matcher:\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=\"Cannot use `matched` in this context: there is no matcher to default to.\",\n )\n idx += len(\"matched:\")\n tags = [x for x in matcher.tags if x in MODIFIABLE_TAGS]\n pattern = matcher.pattern.pattern\n case_insensitive = matcher.pattern.case_insensitive\n else:\n tags = []\n found_colon = False\n while True:\n for t, resolved in ALL_TAGS.items():\n if not raw[idx:].startswith(t):\n continue\n if raw[idx:][len(t)] not in [\":\", \",\"]:\n continue\n for resolvedtag in resolved:\n if resolvedtag not in MODIFIABLE_TAGS:\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=f\"Invalid tag: {t} is not modifiable.\",\n )\n tags.append(resolvedtag)\n idx += len(t) + 1\n found_colon = raw[idx - 1] == \":\"\n break\n else:\n tags_to_print: list[str] = []\n for t, resolvedtags in ALL_TAGS.items():\n if all(r in MODIFIABLE_TAGS for r in resolvedtags):\n tags_to_print.append(t)\n feedback = f\"Invalid tag: must be one of {{{', '.join(tags_to_print)}}}. The next character after a tag must be ':' or ','.\"\n if matcher:\n feedback = f\"Invalid tag: must be one of matched, {{{', '.join(tags_to_print)}}}. (And if the value is matched, it must be alone.) The next character after a tag must be ':' or ','.\"\n raise RuleSyntaxError(**err, index=idx, feedback=feedback)\n if found_colon:\n break\n\n # And now parse the optional pattern. If the next character is a `::`, then we have an\n # explicitly empty pattern, after which we reach the end of the tags+pattern section.\n pattern = None\n case_insensitive = False\n if raw[idx : idx + 2] == \"::\":\n idx += 2\n # Otherwise, if we hit a lone `:`, we've hit the end of the tags+pattern section, but\n # the pattern is not specified. In this case, default to the matcher's pattern, if we\n # have a matcher.\n # hit the end of the matcher, and we should proceed to the action.\n elif raw[idx] == \":\":\n idx += 1\n if matcher and tags == matcher.tags:\n pattern = matcher.pattern.pattern\n # And otherwise, parse the pattern!\n else:\n pattern, fwd = take(raw[idx:], \":\")\n idx += fwd\n # Set an empty pattern to null.\n pattern = pattern or None\n\n # If we don't see the second colon here, that means we are looking at\n # single-character flags. Only check this if pattern is not null though.\n if pattern and raw[idx : idx + 1] != \":\":\n flags, fwd = take(raw[idx:], \":\")\n if not flags:\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=\"No flags specified: Please remove this section (by deleting the colon) or specify one of the supported flags: `i` (case insensitive).\",\n )\n for i, flag in enumerate(flags):\n if flag == \"i\":\n case_insensitive = True\n continue\n raise RuleSyntaxError(\n **err,\n index=idx + i,\n feedback=\"Unrecognized flag: Either you forgot a colon here (to end the matcher), or this is an invalid matcher flag. The only supported flag is `i` (case insensitive).\",\n )\n idx += fwd\n # Skip the second colon. Now we're at the start of the action.\n idx += 1\n\n # Then let's start parsing the action!\n action_kind, fwd = take(raw[idx:], \":\")\n valid_actions = [\n \"replace\",\n \"sed\",\n \"split\",\n \"add\",\n \"delete\",\n ]\n if action_kind not in valid_actions:\n feedback = f\"Invalid action kind: must be one of {{{', '.join(valid_actions)}}}.\"\n if idx == 0 and \":\" in raw:\n feedback += \" If this is pointing at your pattern, you forgot to put :: (double colons) between the matcher section and the action section.\"\n raise RuleSyntaxError(**err, index=idx, feedback=feedback)\n idx += fwd\n\n # Validate that the action type is supported for the given tags.\n if action_kind == \"split\" or action_kind == \"add\":\n single_valued_tags = [t for t in tags if t in SINGLE_VALUE_TAGS]\n if single_valued_tags:\n raise InvalidRuleError(\n f\"Single valued tags {', '.join(single_valued_tags)} cannot be modified by multi-value action {action_kind}\"\n )\n\n # And then parse each action kind separately.\n behavior: ReplaceAction | SedAction | SplitAction | AddAction | DeleteAction\n if action_kind == \"replace\":\n replacement, fwd = take(raw[idx:], \":\", including=False)\n idx += fwd\n if replacement == \"\":\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=\"Replacement not found: must specify a non-empty replacement. Use the delete action to remove a value.\",\n )\n if raw[idx:]:\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=\"Found another section after the replacement, but the replacement must be the last section. Perhaps you meant to escape this colon?\",\n )\n behavior = ReplaceAction(replacement=replacement)\n elif action_kind == \"sed\":\n src_str, fwd = take(raw[idx:], \":\", including=False)\n if src_str == \"\":\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=f\"Empty sed pattern found: must specify a non-empty pattern. Example: {raw}:pattern:replacement\",\n )\n try:\n src = re.compile(src_str)\n except re.error as e:\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=f\"Failed to compile the sed pattern regex: invalid pattern: {e}\",\n ) from e\n idx += fwd\n\n if len(raw) == idx or raw[idx] != \":\":\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=f\"Sed replacement not found: must specify a sed replacement section. Example: {raw}:replacement.\",\n )\n idx += 1\n\n dst, fwd = take(raw[idx:], \":\", including=False)\n idx += fwd\n if raw[idx:]:\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=\"Found another section after the sed replacement, but the sed replacement must be the last section. Perhaps you meant to escape this colon?\",\n )\n behavior = SedAction(src=src, dst=dst)\n elif action_kind == \"split\":\n delimiter, fwd = take(raw[idx:], \":\", including=False)\n idx += fwd\n if delimiter == \"\":\n feedback = \"Delimiter not found: must specify a non-empty delimiter to split on.\"\n if len(raw) > idx and raw[idx] == \":\":\n feedback += \" Perhaps you meant to escape this colon?\"\n raise RuleSyntaxError(**err, index=idx, feedback=feedback)\n if raw[idx:]:\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=\"Found another section after the delimiter, but the delimiter must be the last section. Perhaps you meant to escape this colon?\",\n )\n behavior = SplitAction(delimiter=delimiter)\n elif action_kind == \"add\":\n value, fwd = take(raw[idx:], \":\", including=False)\n idx += fwd\n if value == \"\":\n feedback = \"Value not found: must specify a non-empty value to add.\"\n if len(raw) > idx and raw[idx] == \":\":\n feedback += \" Perhaps you meant to escape this colon?\"\n raise RuleSyntaxError(**err, index=idx, feedback=feedback)\n if raw[idx:]:\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=\"Found another section after the value, but the value must be the last section. Perhaps you meant to escape this colon?\",\n )\n behavior = AddAction(value=value)\n elif action_kind == \"delete\":\n if raw[idx:]:\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=\"Found another section after the action kind, but the delete action has no parameters. Please remove this section.\",\n )\n behavior = DeleteAction()\n else: # pragma: no cover\n raise RoseError(f\"Impossible: unknown action_kind {action_kind=}\")\n\n action = MetadataAction(\n behavior=behavior,\n tags=tags,\n pattern=MatcherPattern(pattern=pattern, case_insensitive=case_insensitive)\n if pattern\n else None,\n )\n logger.debug(f\"Parsed rule action {raw=} {matcher=} as {action=}\")\n return action" }, { "identifier": "MetadataMatcher", "path": "rose/rule_parser.py", "snippet": "class MetadataMatcher:\n # Tags to test against the pattern. If any tags match the pattern, the action will be ran\n # against the track.\n tags: list[Tag]\n # The pattern to test the tag against.\n pattern: MatcherPattern\n\n def __str__(self) -> str:\n r = stringify_tags(self.tags)\n r += \":\"\n r += str(self.pattern)\n return r\n\n @classmethod\n def parse(cls, raw: str) -> MetadataMatcher:\n idx = 0\n # Common arguments to feed into Syntax Error.\n err = {\"rule_name\": \"matcher\", \"rule\": raw}\n\n # First, parse the tags.\n tags: list[Tag] = []\n found_colon = False\n while True:\n for t, resolved in ALL_TAGS.items():\n if not raw[idx:].startswith(t):\n continue\n try:\n if raw[idx:][len(t)] not in [\":\", \",\"]:\n continue\n except IndexError:\n raise RuleSyntaxError(\n **err,\n index=idx + len(t),\n feedback=\"Expected to find ',' or ':', found end of string.\",\n ) from None\n tags.extend(resolved)\n idx += len(t) + 1\n found_colon = raw[idx - 1] == \":\"\n break\n else:\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=f\"Invalid tag: must be one of {{{', '.join(ALL_TAGS)}}}. The next character after a tag must be ':' or ','.\",\n )\n if found_colon:\n break\n\n # Then parse the pattern.\n pattern, fwd = take(raw[idx:], \":\", including=False)\n idx += fwd\n\n # If more input is remaining, it should be optional single-character flags.\n case_insensitive = False\n if idx < len(raw) and raw[idx] == \":\":\n idx += 1\n flags, fwd = take(raw[idx:], \":\")\n if not flags:\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=\"No flags specified: Please remove this section (by deleting the colon) or specify one of the supported flags: `i` (case insensitive).\",\n )\n for i, flag in enumerate(flags):\n if flag == \"i\":\n case_insensitive = True\n continue\n raise RuleSyntaxError(\n **err,\n index=idx + i,\n feedback=\"Unrecognized flag: Please specify one of the supported flags: `i` (case insensitive).\",\n )\n idx += fwd\n\n if raw[idx:]:\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=\"Extra input found after end of matcher. Perhaps you meant to escape this colon?\",\n )\n\n matcher = MetadataMatcher(\n tags=tags,\n pattern=MatcherPattern(pattern=pattern, case_insensitive=case_insensitive),\n )\n logger.debug(f\"Parsed rule matcher {raw=} as {matcher=}\")\n return matcher" }, { "identifier": "dump_track", "path": "rose/tracks.py", "snippet": "def dump_track(c: Config, track_id: str) -> str:\n track = get_track(c, track_id)\n if track is None:\n raise TrackDoesNotExistError(f\"Track {track_id} does not exist\")\n return json.dumps(track.dump())" }, { "identifier": "dump_tracks", "path": "rose/tracks.py", "snippet": "def dump_tracks(c: Config, matcher: MetadataMatcher | None = None) -> str:\n track_ids = None\n if matcher:\n track_ids = [t.id for t in fast_search_for_matching_tracks(c, matcher)]\n tracks = list_tracks(c, track_ids)\n if matcher:\n tracks = filter_track_false_positives_using_read_cache(matcher, tracks)\n return json.dumps([t.dump() for t in tracks])" }, { "identifier": "run_actions_on_track", "path": "rose/tracks.py", "snippet": "def run_actions_on_track(\n c: Config,\n track_id: str,\n actions: list[MetadataAction],\n *,\n dry_run: bool = False,\n confirm_yes: bool = False,\n) -> None:\n \"\"\"Run rule engine actions on a release.\"\"\"\n track = get_track(c, track_id)\n if track is None:\n raise TrackDoesNotExistError(f\"Track {track_id} does not exist\")\n audiotag = AudioTags.from_file(track.source_path)\n execute_metadata_actions(c, actions, [audiotag], dry_run=dry_run, confirm_yes=confirm_yes)" } ]
import json import pytest from pathlib import Path from rose.audiotags import AudioTags from rose.config import Config from rose.rule_parser import MetadataAction, MetadataMatcher from rose.tracks import dump_track, dump_tracks, run_actions_on_track
14,284
"producer": [], "remixer": [], }, "discnumber": "01", "disctotal": 1, "duration_seconds": 120, "id": "t4", "source_path": f"{config.music_source_dir}/r3/01.m4a", "tracktitle": "Track 1", "tracknumber": "01", "tracktotal": 1, "added_at": "0000-01-01T00:00:00+00:00", "release_id": "r3", "albumtitle": "Release 3", "releasetype": "album", "year": 2021, "new": True, "genres": [], "labels": [], "albumartists": { "main": [], "guest": [], "remixer": [], "producer": [], "composer": [], "djmixer": [], }, }, ] @pytest.mark.usefixtures("seeded_cache") def test_dump_tracks_with_matcher(config: Config) -> None: matcher = MetadataMatcher.parse("artist:Techno Man") assert json.loads(dump_tracks(config, matcher)) == [ { "trackartists": { "composer": [], "djmixer": [], "guest": [], "main": [ {"alias": False, "name": "Techno Man"}, {"alias": False, "name": "Bass Man"}, ], "producer": [], "remixer": [], }, "discnumber": "01", "disctotal": 1, "duration_seconds": 120, "id": "t1", "source_path": f"{config.music_source_dir}/r1/01.m4a", "tracktitle": "Track 1", "tracknumber": "01", "tracktotal": 2, "added_at": "0000-01-01T00:00:00+00:00", "release_id": "r1", "albumtitle": "Release 1", "releasetype": "album", "year": 2023, "new": False, "genres": ["Techno", "Deep House"], "labels": ["Silk Music"], "albumartists": { "main": [ {"name": "Techno Man", "alias": False}, {"name": "Bass Man", "alias": False}, ], "guest": [], "remixer": [], "producer": [], "composer": [], "djmixer": [], }, }, { "trackartists": { "composer": [], "djmixer": [], "guest": [], "main": [ {"alias": False, "name": "Techno Man"}, {"alias": False, "name": "Bass Man"}, ], "producer": [], "remixer": [], }, "discnumber": "01", "disctotal": 1, "duration_seconds": 240, "id": "t2", "source_path": f"{config.music_source_dir}/r1/02.m4a", "tracktitle": "Track 2", "tracknumber": "02", "tracktotal": 2, "added_at": "0000-01-01T00:00:00+00:00", "release_id": "r1", "albumtitle": "Release 1", "releasetype": "album", "year": 2023, "new": False, "genres": ["Techno", "Deep House"], "labels": ["Silk Music"], "albumartists": { "main": [ {"name": "Techno Man", "alias": False}, {"name": "Bass Man", "alias": False}, ], "guest": [], "remixer": [], "producer": [], "composer": [], "djmixer": [], }, }, ] @pytest.mark.usefixtures("seeded_cache") def test_dump_track(config: Config) -> None:
def test_run_action_on_track(config: Config, source_dir: Path) -> None: action = MetadataAction.parse("tracktitle::replace:Bop") af = AudioTags.from_file(source_dir / "Test Release 2" / "01.m4a") assert af.id is not None run_actions_on_track(config, af.id, [action]) af = AudioTags.from_file(source_dir / "Test Release 2" / "01.m4a") assert af.title == "Bop" @pytest.mark.usefixtures("seeded_cache") def test_dump_tracks(config: Config) -> None: assert json.loads(dump_tracks(config)) == [ { "trackartists": { "composer": [], "djmixer": [], "guest": [], "main": [ {"alias": False, "name": "Techno Man"}, {"alias": False, "name": "Bass Man"}, ], "producer": [], "remixer": [], }, "discnumber": "01", "disctotal": 1, "duration_seconds": 120, "id": "t1", "source_path": f"{config.music_source_dir}/r1/01.m4a", "tracktitle": "Track 1", "tracknumber": "01", "tracktotal": 2, "added_at": "0000-01-01T00:00:00+00:00", "release_id": "r1", "albumtitle": "Release 1", "releasetype": "album", "year": 2023, "new": False, "genres": ["Techno", "Deep House"], "labels": ["Silk Music"], "albumartists": { "main": [ {"name": "Techno Man", "alias": False}, {"name": "Bass Man", "alias": False}, ], "guest": [], "remixer": [], "producer": [], "composer": [], "djmixer": [], }, }, { "trackartists": { "composer": [], "djmixer": [], "guest": [], "main": [ {"alias": False, "name": "Techno Man"}, {"alias": False, "name": "Bass Man"}, ], "producer": [], "remixer": [], }, "discnumber": "01", "disctotal": 1, "duration_seconds": 240, "id": "t2", "source_path": f"{config.music_source_dir}/r1/02.m4a", "tracktitle": "Track 2", "tracknumber": "02", "tracktotal": 2, "added_at": "0000-01-01T00:00:00+00:00", "release_id": "r1", "albumtitle": "Release 1", "releasetype": "album", "year": 2023, "new": False, "genres": ["Techno", "Deep House"], "labels": ["Silk Music"], "albumartists": { "main": [ {"name": "Techno Man", "alias": False}, {"name": "Bass Man", "alias": False}, ], "guest": [], "remixer": [], "producer": [], "composer": [], "djmixer": [], }, }, { "trackartists": { "composer": [], "djmixer": [], "guest": [{"alias": False, "name": "Conductor Woman"}], "main": [{"alias": False, "name": "Violin Woman"}], "producer": [], "remixer": [], }, "discnumber": "01", "disctotal": 1, "duration_seconds": 120, "id": "t3", "source_path": f"{config.music_source_dir}/r2/01.m4a", "tracktitle": "Track 1", "tracknumber": "01", "tracktotal": 1, "added_at": "0000-01-01T00:00:00+00:00", "release_id": "r2", "albumtitle": "Release 2", "releasetype": "album", "year": 2021, "new": False, "genres": ["Classical"], "labels": ["Native State"], "albumartists": { "main": [{"name": "Violin Woman", "alias": False}], "guest": [{"name": "Conductor Woman", "alias": False}], "remixer": [], "producer": [], "composer": [], "djmixer": [], }, }, { "trackartists": { "composer": [], "djmixer": [], "guest": [], "main": [], "producer": [], "remixer": [], }, "discnumber": "01", "disctotal": 1, "duration_seconds": 120, "id": "t4", "source_path": f"{config.music_source_dir}/r3/01.m4a", "tracktitle": "Track 1", "tracknumber": "01", "tracktotal": 1, "added_at": "0000-01-01T00:00:00+00:00", "release_id": "r3", "albumtitle": "Release 3", "releasetype": "album", "year": 2021, "new": True, "genres": [], "labels": [], "albumartists": { "main": [], "guest": [], "remixer": [], "producer": [], "composer": [], "djmixer": [], }, }, ] @pytest.mark.usefixtures("seeded_cache") def test_dump_tracks_with_matcher(config: Config) -> None: matcher = MetadataMatcher.parse("artist:Techno Man") assert json.loads(dump_tracks(config, matcher)) == [ { "trackartists": { "composer": [], "djmixer": [], "guest": [], "main": [ {"alias": False, "name": "Techno Man"}, {"alias": False, "name": "Bass Man"}, ], "producer": [], "remixer": [], }, "discnumber": "01", "disctotal": 1, "duration_seconds": 120, "id": "t1", "source_path": f"{config.music_source_dir}/r1/01.m4a", "tracktitle": "Track 1", "tracknumber": "01", "tracktotal": 2, "added_at": "0000-01-01T00:00:00+00:00", "release_id": "r1", "albumtitle": "Release 1", "releasetype": "album", "year": 2023, "new": False, "genres": ["Techno", "Deep House"], "labels": ["Silk Music"], "albumartists": { "main": [ {"name": "Techno Man", "alias": False}, {"name": "Bass Man", "alias": False}, ], "guest": [], "remixer": [], "producer": [], "composer": [], "djmixer": [], }, }, { "trackartists": { "composer": [], "djmixer": [], "guest": [], "main": [ {"alias": False, "name": "Techno Man"}, {"alias": False, "name": "Bass Man"}, ], "producer": [], "remixer": [], }, "discnumber": "01", "disctotal": 1, "duration_seconds": 240, "id": "t2", "source_path": f"{config.music_source_dir}/r1/02.m4a", "tracktitle": "Track 2", "tracknumber": "02", "tracktotal": 2, "added_at": "0000-01-01T00:00:00+00:00", "release_id": "r1", "albumtitle": "Release 1", "releasetype": "album", "year": 2023, "new": False, "genres": ["Techno", "Deep House"], "labels": ["Silk Music"], "albumartists": { "main": [ {"name": "Techno Man", "alias": False}, {"name": "Bass Man", "alias": False}, ], "guest": [], "remixer": [], "producer": [], "composer": [], "djmixer": [], }, }, ] @pytest.mark.usefixtures("seeded_cache") def test_dump_track(config: Config) -> None:
assert json.loads(dump_track(config, "t1")) == {
4
2023-10-09 14:42:23+00:00
16k
grainseed/monitask
sam/segment_anything/automatic_mask_generator.py
[ { "identifier": "Sam", "path": "sam/segment_anything/modeling/sam.py", "snippet": "class Sam(nn.Module):\r\n mask_threshold: float = 0.0\r\n image_format: str = \"RGB\"\r\n\r\n def __init__(\r\n self,\r\n image_encoder: ImageEncoderViT,\r\n prompt_encoder: PromptEncoder,\r\n mask_decoder: MaskDecoder,\r\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\r\n pixel_std: List[float] = [58.395, 57.12, 57.375],\r\n ) -> None:\r\n \"\"\"\r\n SAM predicts object masks from an image and input prompts.\r\n\r\n Arguments:\r\n image_encoder (ImageEncoderViT): The backbone used to encode the\r\n image into image embeddings that allow for efficient mask prediction.\r\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\r\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\r\n and encoded prompts.\r\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\r\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\r\n \"\"\"\r\n super().__init__()\r\n self.image_encoder = image_encoder\r\n self.prompt_encoder = prompt_encoder\r\n self.mask_decoder = mask_decoder\r\n self.register_buffer(\"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False)\r\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\r\n\r\n @property\r\n def device(self) -> Any:\r\n return self.pixel_mean.device\r\n\r\n def forward(\r\n self,\r\n batched_input: List[Dict[str, Any]],\r\n multimask_output: bool,\r\n hq_token_only: bool =False,\r\n ) -> List[Dict[str, torch.Tensor]]:\r\n \"\"\"\r\n Predicts masks end-to-end from provided images and prompts.\r\n If prompts are not known in advance, using SamPredictor is\r\n recommended over calling the model directly.\r\n\r\n Arguments:\r\n batched_input (list(dict)): A list over input images, each a\r\n dictionary with the following keys. A prompt key can be\r\n excluded if it is not present.\r\n 'image': The image as a torch tensor in 3xHxW format,\r\n already transformed for input to the model.\r\n 'original_size': (tuple(int, int)) The original size of\r\n the image before transformation, as (H, W).\r\n 'point_coords': (torch.Tensor) Batched point prompts for\r\n this image, with shape BxNx2. Already transformed to the\r\n input frame of the model.\r\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\r\n with shape BxN.\r\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\r\n Already transformed to the input frame of the model.\r\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\r\n in the form Bx1xHxW.\r\n multimask_output (bool): Whether the model should predict multiple\r\n disambiguating masks, or return a single mask.\r\n\r\n Returns:\r\n (list(dict)): A list over input images, where each element is\r\n as dictionary with the following keys.\r\n 'masks': (torch.Tensor) Batched binary mask predictions,\r\n with shape BxCxHxW, where B is the number of input prompts,\r\n C is determined by multimask_output, and (H, W) is the\r\n original size of the image.\r\n 'iou_predictions': (torch.Tensor) The model's predictions\r\n of mask quality, in shape BxC.\r\n 'low_res_logits': (torch.Tensor) Low resolution logits with\r\n shape BxCxHxW, where H=W=256. Can be passed as mask input\r\n to subsequent iterations of prediction.\r\n \"\"\"\r\n input_images = torch.stack([self.preprocess(x[\"image\"]) for x in batched_input], dim=0)\r\n image_embeddings, interm_embeddings = self.image_encoder(input_images)\r\n interm_embeddings = interm_embeddings[0] # early layer\r\n\r\n outputs = []\r\n for image_record, curr_embedding, curr_interm in zip(batched_input, image_embeddings, interm_embeddings):\r\n if \"point_coords\" in image_record:\r\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\r\n else:\r\n points = None\r\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\r\n points=points,\r\n boxes=image_record.get(\"boxes\", None),\r\n masks=image_record.get(\"mask_inputs\", None),\r\n )\r\n low_res_masks, iou_predictions = self.mask_decoder(\r\n image_embeddings=curr_embedding.unsqueeze(0),\r\n image_pe=self.prompt_encoder.get_dense_pe(),\r\n sparse_prompt_embeddings=sparse_embeddings,\r\n dense_prompt_embeddings=dense_embeddings,\r\n multimask_output=multimask_output,\r\n hq_token_only=hq_token_only,\r\n interm_embeddings=curr_interm.unsqueeze(0).unsqueeze(0),\r\n )\r\n masks = self.postprocess_masks(\r\n low_res_masks,\r\n input_size=image_record[\"image\"].shape[-2:],\r\n original_size=image_record[\"original_size\"],\r\n )\r\n masks = masks > self.mask_threshold\r\n outputs.append(\r\n {\r\n \"masks\": masks,\r\n \"iou_predictions\": iou_predictions,\r\n \"low_res_logits\": low_res_masks,\r\n }\r\n )\r\n return outputs\r\n\r\n def postprocess_masks(\r\n self,\r\n masks: torch.Tensor,\r\n input_size: Tuple[int, ...],\r\n original_size: Tuple[int, ...],\r\n ) -> torch.Tensor:\r\n \"\"\"\r\n Remove padding and upscale masks to the original image size.\r\n\r\n Arguments:\r\n masks (torch.Tensor): Batched masks from the mask_decoder,\r\n in BxCxHxW format.\r\n input_size (tuple(int, int)): The size of the image input to the\r\n model, in (H, W) format. Used to remove padding.\r\n original_size (tuple(int, int)): The original size of the image\r\n before resizing for input to the model, in (H, W) format.\r\n\r\n Returns:\r\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\r\n is given by original_size.\r\n \"\"\"\r\n masks = F.interpolate(\r\n masks,\r\n (self.image_encoder.img_size, self.image_encoder.img_size),\r\n mode=\"bilinear\",\r\n align_corners=False,\r\n )\r\n masks = masks[..., : input_size[0], : input_size[1]]\r\n masks = F.interpolate(masks, original_size, mode=\"bilinear\", align_corners=False)\r\n return masks\r\n\r\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\r\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\r\n # Normalize colors\r\n x = (x - self.pixel_mean) / self.pixel_std\r\n\r\n # Pad\r\n h, w = x.shape[-2:]\r\n padh = self.image_encoder.img_size - h\r\n padw = self.image_encoder.img_size - w\r\n x = F.pad(x, (0, padw, 0, padh))\r\n return x\r" }, { "identifier": "SamPredictor", "path": "sam/segment_anything/predictor.py", "snippet": "class SamPredictor:\r\n def __init__(\r\n self,\r\n sam_model: Sam,\r\n ) -> None:\r\n \"\"\"\r\n Uses SAM to calculate the image embedding for an image, and then\r\n allow repeated, efficient mask prediction given prompts.\r\n\r\n Arguments:\r\n sam_model (Sam): The model to use for mask prediction.\r\n \"\"\"\r\n super().__init__()\r\n self.model = sam_model\r\n self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)\r\n self.reset_image()\r\n\r\n def set_image(\r\n self,\r\n image: np.ndarray,\r\n image_format: str = \"RGB\",\r\n ) -> None:\r\n \"\"\"\r\n Calculates the image embeddings for the provided image, allowing\r\n masks to be predicted with the 'predict' method.\r\n\r\n Arguments:\r\n image (np.ndarray): The image for calculating masks. Expects an\r\n image in HWC uint8 format, with pixel values in [0, 255].\r\n image_format (str): The color format of the image, in ['RGB', 'BGR'].\r\n \"\"\"\r\n assert image_format in [\r\n \"RGB\",\r\n \"BGR\",\r\n ], f\"image_format must be in ['RGB', 'BGR'], is {image_format}.\"\r\n # import pdb;pdb.set_trace()\r\n if image_format != self.model.image_format:\r\n image = image[..., ::-1]\r\n\r\n # Transform the image to the form expected by the model\r\n # import pdb;pdb.set_trace()\r\n input_image = self.transform.apply_image(image)\r\n input_image_torch = torch.as_tensor(input_image, device=self.device)\r\n input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :]\r\n\r\n self.set_torch_image(input_image_torch, image.shape[:2])\r\n\r\n @torch.no_grad()\r\n def set_torch_image(\r\n self,\r\n transformed_image: torch.Tensor,\r\n original_image_size: Tuple[int, ...],\r\n ) -> None:\r\n \"\"\"\r\n Calculates the image embeddings for the provided image, allowing\r\n masks to be predicted with the 'predict' method. Expects the input\r\n image to be already transformed to the format expected by the model.\r\n\r\n Arguments:\r\n transformed_image (torch.Tensor): The input image, with shape\r\n 1x3xHxW, which has been transformed with ResizeLongestSide.\r\n original_image_size (tuple(int, int)): The size of the image\r\n before transformation, in (H, W) format.\r\n \"\"\"\r\n assert (\r\n len(transformed_image.shape) == 4\r\n and transformed_image.shape[1] == 3\r\n and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size\r\n ), f\"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}.\"\r\n self.reset_image()\r\n\r\n self.original_size = original_image_size\r\n self.input_size = tuple(transformed_image.shape[-2:])\r\n input_image = self.model.preprocess(transformed_image)\r\n self.features, self.interm_features = self.model.image_encoder(input_image)\r\n self.is_image_set = True\r\n\r\n def predict(\r\n self,\r\n point_coords: Optional[np.ndarray] = None,\r\n point_labels: Optional[np.ndarray] = None,\r\n box: Optional[np.ndarray] = None,\r\n mask_input: Optional[np.ndarray] = None,\r\n multimask_output: bool = True,\r\n return_logits: bool = False,\r\n hq_token_only: bool =False,\r\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\r\n \"\"\"\r\n Predict masks for the given input prompts, using the currently set image.\r\n\r\n Arguments:\r\n point_coords (np.ndarray or None): A Nx2 array of point prompts to the\r\n model. Each point is in (X,Y) in pixels.\r\n point_labels (np.ndarray or None): A length N array of labels for the\r\n point prompts. 1 indicates a foreground point and 0 indicates a\r\n background point.\r\n box (np.ndarray or None): A length 4 array given a box prompt to the\r\n model, in XYXY format.\r\n mask_input (np.ndarray): A low resolution mask input to the model, typically\r\n coming from a previous prediction iteration. Has form 1xHxW, where\r\n for SAM, H=W=256.\r\n multimask_output (bool): If true, the model will return three masks.\r\n For ambiguous input prompts (such as a single click), this will often\r\n produce better masks than a single prediction. If only a single\r\n mask is needed, the model's predicted quality score can be used\r\n to select the best mask. For non-ambiguous prompts, such as multiple\r\n input prompts, multimask_output=False can give better results.\r\n return_logits (bool): If true, returns un-thresholded masks logits\r\n instead of a binary mask.\r\n\r\n Returns:\r\n (np.ndarray): The output masks in CxHxW format, where C is the\r\n number of masks, and (H, W) is the original image size.\r\n (np.ndarray): An array of length C containing the model's\r\n predictions for the quality of each mask.\r\n (np.ndarray): An array of shape CxHxW, where C is the number\r\n of masks and H=W=256. These low resolution logits can be passed to\r\n a subsequent iteration as mask input.\r\n \"\"\"\r\n if not self.is_image_set:\r\n raise RuntimeError(\"An image must be set with .set_image(...) before mask prediction.\")\r\n\r\n # Transform input prompts\r\n coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None\r\n if point_coords is not None:\r\n assert (\r\n point_labels is not None\r\n ), \"point_labels must be supplied if point_coords is supplied.\"\r\n point_coords = self.transform.apply_coords(point_coords, self.original_size)\r\n coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device)\r\n labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)\r\n coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]\r\n if box is not None:\r\n box = self.transform.apply_boxes(box, self.original_size)\r\n box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)\r\n box_torch = box_torch[None, :]\r\n if mask_input is not None:\r\n mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device)\r\n mask_input_torch = mask_input_torch[None, :, :, :]\r\n\r\n masks, iou_predictions, low_res_masks = self.predict_torch(\r\n coords_torch,\r\n labels_torch,\r\n box_torch,\r\n mask_input_torch,\r\n multimask_output,\r\n return_logits=return_logits,\r\n hq_token_only=hq_token_only,\r\n )\r\n\r\n masks_np = masks[0].detach().cpu().numpy()\r\n iou_predictions_np = iou_predictions[0].detach().cpu().numpy()\r\n low_res_masks_np = low_res_masks[0].detach().cpu().numpy()\r\n return masks_np, iou_predictions_np, low_res_masks_np\r\n\r\n @torch.no_grad()\r\n def predict_torch(\r\n self,\r\n point_coords: Optional[torch.Tensor],\r\n point_labels: Optional[torch.Tensor],\r\n boxes: Optional[torch.Tensor] = None,\r\n mask_input: Optional[torch.Tensor] = None,\r\n multimask_output: bool = True,\r\n return_logits: bool = False,\r\n hq_token_only: bool =False,\r\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\r\n \"\"\"\r\n Predict masks for the given input prompts, using the currently set image.\r\n Input prompts are batched torch tensors and are expected to already be\r\n transformed to the input frame using ResizeLongestSide.\r\n\r\n Arguments:\r\n point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the\r\n model. Each point is in (X,Y) in pixels.\r\n point_labels (torch.Tensor or None): A BxN array of labels for the\r\n point prompts. 1 indicates a foreground point and 0 indicates a\r\n background point.\r\n boxes (np.ndarray or None): A Bx4 array given a box prompt to the\r\n model, in XYXY format.\r\n mask_input (np.ndarray): A low resolution mask input to the model, typically\r\n coming from a previous prediction iteration. Has form Bx1xHxW, where\r\n for SAM, H=W=256. Masks returned by a previous iteration of the\r\n predict method do not need further transformation.\r\n multimask_output (bool): If true, the model will return three masks.\r\n For ambiguous input prompts (such as a single click), this will often\r\n produce better masks than a single prediction. If only a single\r\n mask is needed, the model's predicted quality score can be used\r\n to select the best mask. For non-ambiguous prompts, such as multiple\r\n input prompts, multimask_output=False can give better results.\r\n return_logits (bool): If true, returns un-thresholded masks logits\r\n instead of a binary mask.\r\n\r\n Returns:\r\n (torch.Tensor): The output masks in BxCxHxW format, where C is the\r\n number of masks, and (H, W) is the original image size.\r\n (torch.Tensor): An array of shape BxC containing the model's\r\n predictions for the quality of each mask.\r\n (torch.Tensor): An array of shape BxCxHxW, where C is the number\r\n of masks and H=W=256. These low res logits can be passed to\r\n a subsequent iteration as mask input.\r\n \"\"\"\r\n if not self.is_image_set:\r\n raise RuntimeError(\"An image must be set with .set_image(...) before mask prediction.\")\r\n\r\n if point_coords is not None:\r\n points = (point_coords, point_labels)\r\n else:\r\n points = None\r\n\r\n # Embed prompts\r\n sparse_embeddings, dense_embeddings = self.model.prompt_encoder(\r\n points=points,\r\n boxes=boxes,\r\n masks=mask_input,\r\n )\r\n\r\n # Predict masks\r\n low_res_masks, iou_predictions = self.model.mask_decoder(\r\n image_embeddings=self.features,\r\n image_pe=self.model.prompt_encoder.get_dense_pe(),\r\n sparse_prompt_embeddings=sparse_embeddings,\r\n dense_prompt_embeddings=dense_embeddings,\r\n multimask_output=multimask_output,\r\n hq_token_only=hq_token_only,\r\n interm_embeddings=self.interm_features,\r\n )\r\n\r\n # Upscale the masks to the original image resolution\r\n masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size)\r\n\r\n if not return_logits:\r\n masks = masks > self.model.mask_threshold\r\n\r\n return masks, iou_predictions, low_res_masks\r\n\r\n def get_image_embedding(self) -> torch.Tensor:\r\n \"\"\"\r\n Returns the image embeddings for the currently set image, with\r\n shape 1xCxHxW, where C is the embedding dimension and (H,W) are\r\n the embedding spatial dimension of SAM (typically C=256, H=W=64).\r\n \"\"\"\r\n if not self.is_image_set:\r\n raise RuntimeError(\r\n \"An image must be set with .set_image(...) to generate an embedding.\"\r\n )\r\n assert self.features is not None, \"Features must exist if an image has been set.\"\r\n return self.features\r\n\r\n @property\r\n def device(self) -> torch.device:\r\n return self.model.device\r\n\r\n def reset_image(self) -> None:\r\n \"\"\"Resets the currently set image.\"\"\"\r\n self.is_image_set = False\r\n self.features = None\r\n self.orig_h = None\r\n self.orig_w = None\r\n self.input_h = None\r\n self.input_w = None\r" }, { "identifier": "MaskData", "path": "sam/segment_anything/utils/amg.py", "snippet": "class MaskData:\r\n \"\"\"\r\n A structure for storing masks and their related data in batched format.\r\n Implements basic filtering and concatenation.\r\n \"\"\"\r\n\r\n def __init__(self, **kwargs) -> None:\r\n for v in kwargs.values():\r\n assert isinstance(\r\n v, (list, np.ndarray, torch.Tensor)\r\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\r\n self._stats = dict(**kwargs)\r\n\r\n def __setitem__(self, key: str, item: Any) -> None:\r\n assert isinstance(\r\n item, (list, np.ndarray, torch.Tensor)\r\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\r\n self._stats[key] = item\r\n\r\n def __delitem__(self, key: str) -> None:\r\n del self._stats[key]\r\n\r\n def __getitem__(self, key: str) -> Any:\r\n return self._stats[key]\r\n\r\n def items(self) -> ItemsView[str, Any]:\r\n return self._stats.items()\r\n\r\n def filter(self, keep: torch.Tensor) -> None:\r\n for k, v in self._stats.items():\r\n if v is None:\r\n self._stats[k] = None\r\n elif isinstance(v, torch.Tensor):\r\n self._stats[k] = v[torch.as_tensor(keep, device=v.device)]\r\n elif isinstance(v, np.ndarray):\r\n self._stats[k] = v[keep.detach().cpu().numpy()]\r\n elif isinstance(v, list) and keep.dtype == torch.bool:\r\n self._stats[k] = [a for i, a in enumerate(v) if keep[i]]\r\n elif isinstance(v, list):\r\n self._stats[k] = [v[i] for i in keep]\r\n else:\r\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\r\n\r\n def cat(self, new_stats: \"MaskData\") -> None:\r\n for k, v in new_stats.items():\r\n if k not in self._stats or self._stats[k] is None:\r\n self._stats[k] = deepcopy(v)\r\n elif isinstance(v, torch.Tensor):\r\n self._stats[k] = torch.cat([self._stats[k], v], dim=0)\r\n elif isinstance(v, np.ndarray):\r\n self._stats[k] = np.concatenate([self._stats[k], v], axis=0)\r\n elif isinstance(v, list):\r\n self._stats[k] = self._stats[k] + deepcopy(v)\r\n else:\r\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\r\n\r\n def to_numpy(self) -> None:\r\n for k, v in self._stats.items():\r\n if isinstance(v, torch.Tensor):\r\n self._stats[k] = v.detach().cpu().numpy()\r" }, { "identifier": "area_from_rle", "path": "sam/segment_anything/utils/amg.py", "snippet": "def area_from_rle(rle: Dict[str, Any]) -> int:\r\n return sum(rle[\"counts\"][1::2])\r" }, { "identifier": "batch_iterator", "path": "sam/segment_anything/utils/amg.py", "snippet": "def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:\r\n assert len(args) > 0 and all(\r\n len(a) == len(args[0]) for a in args\r\n ), \"Batched iteration must have inputs of all the same size.\"\r\n n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)\r\n for b in range(n_batches):\r\n yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]\r" }, { "identifier": "batched_mask_to_box", "path": "sam/segment_anything/utils/amg.py", "snippet": "def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:\r\n \"\"\"\r\n Calculates boxes in XYXY format around masks. Return [0,0,0,0] for\r\n an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.\r\n \"\"\"\r\n # torch.max below raises an error on empty inputs, just skip in this case\r\n if torch.numel(masks) == 0:\r\n return torch.zeros(*masks.shape[:-2], 4, device=masks.device)\r\n\r\n # Normalize shape to CxHxW\r\n shape = masks.shape\r\n h, w = shape[-2:]\r\n if len(shape) > 2:\r\n masks = masks.flatten(0, -3)\r\n else:\r\n masks = masks.unsqueeze(0)\r\n\r\n # Get top and bottom edges\r\n in_height, _ = torch.max(masks, dim=-1)\r\n in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]\r\n bottom_edges, _ = torch.max(in_height_coords, dim=-1)\r\n in_height_coords = in_height_coords + h * (~in_height)\r\n top_edges, _ = torch.min(in_height_coords, dim=-1)\r\n\r\n # Get left and right edges\r\n in_width, _ = torch.max(masks, dim=-2)\r\n in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]\r\n right_edges, _ = torch.max(in_width_coords, dim=-1)\r\n in_width_coords = in_width_coords + w * (~in_width)\r\n left_edges, _ = torch.min(in_width_coords, dim=-1)\r\n\r\n # If the mask is empty the right edge will be to the left of the left edge.\r\n # Replace these boxes with [0, 0, 0, 0]\r\n empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\r\n out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)\r\n out = out * (~empty_filter).unsqueeze(-1)\r\n\r\n # Return to original shape\r\n if len(shape) > 2:\r\n out = out.reshape(*shape[:-2], 4)\r\n else:\r\n out = out[0]\r\n\r\n return out\r" }, { "identifier": "box_xyxy_to_xywh", "path": "sam/segment_anything/utils/amg.py", "snippet": "def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:\r\n box_xywh = deepcopy(box_xyxy)\r\n box_xywh[2] = box_xywh[2] - box_xywh[0]\r\n box_xywh[3] = box_xywh[3] - box_xywh[1]\r\n return box_xywh\r" }, { "identifier": "build_all_layer_point_grids", "path": "sam/segment_anything/utils/amg.py", "snippet": "def build_all_layer_point_grids(\r\n n_per_side: int, n_layers: int, scale_per_layer: int\r\n) -> List[np.ndarray]:\r\n \"\"\"Generates point grids for all crop layers.\"\"\"\r\n points_by_layer = []\r\n for i in range(n_layers + 1):\r\n n_points = int(n_per_side / (scale_per_layer**i))\r\n points_by_layer.append(build_point_grid(n_points))\r\n return points_by_layer\r" }, { "identifier": "calculate_stability_score", "path": "sam/segment_anything/utils/amg.py", "snippet": "def calculate_stability_score(\r\n masks: torch.Tensor, mask_threshold: float, threshold_offset: float\r\n) -> torch.Tensor:\r\n \"\"\"\r\n Computes the stability score for a batch of masks. The stability\r\n score is the IoU between the binary masks obtained by thresholding\r\n the predicted mask logits at high and low values.\r\n \"\"\"\r\n # One mask is always contained inside the other.\r\n # Save memory by preventing unnecessary cast to torch.int64\r\n intersections = (\r\n (masks > (mask_threshold + threshold_offset))\r\n .sum(-1, dtype=torch.int16)\r\n .sum(-1, dtype=torch.int32)\r\n )\r\n unions = (\r\n (masks > (mask_threshold - threshold_offset))\r\n .sum(-1, dtype=torch.int16)\r\n .sum(-1, dtype=torch.int32)\r\n )\r\n return intersections / unions\r" }, { "identifier": "coco_encode_rle", "path": "sam/segment_anything/utils/amg.py", "snippet": "def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:\r\n from pycocotools import mask as mask_utils # type: ignore\r\n\r\n h, w = uncompressed_rle[\"size\"]\r\n rle = mask_utils.frPyObjects(uncompressed_rle, h, w)\r\n rle[\"counts\"] = rle[\"counts\"].decode(\"utf-8\") # Necessary to serialize with json\r\n return rle\r" }, { "identifier": "generate_crop_boxes", "path": "sam/segment_anything/utils/amg.py", "snippet": "def generate_crop_boxes(\r\n im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float\r\n) -> Tuple[List[List[int]], List[int]]:\r\n \"\"\"\r\n Generates a list of crop boxes of different sizes. Each layer\r\n has (2**i)**2 boxes for the ith layer.\r\n \"\"\"\r\n crop_boxes, layer_idxs = [], []\r\n im_h, im_w = im_size\r\n short_side = min(im_h, im_w)\r\n\r\n # Original image\r\n crop_boxes.append([0, 0, im_w, im_h])\r\n layer_idxs.append(0)\r\n\r\n def crop_len(orig_len, n_crops, overlap):\r\n return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))\r\n\r\n for i_layer in range(n_layers):\r\n n_crops_per_side = 2 ** (i_layer + 1)\r\n overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))\r\n\r\n crop_w = crop_len(im_w, n_crops_per_side, overlap)\r\n crop_h = crop_len(im_h, n_crops_per_side, overlap)\r\n\r\n crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]\r\n crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]\r\n\r\n # Crops in XYWH format\r\n for x0, y0 in product(crop_box_x0, crop_box_y0):\r\n box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]\r\n crop_boxes.append(box)\r\n layer_idxs.append(i_layer + 1)\r\n\r\n return crop_boxes, layer_idxs\r" }, { "identifier": "is_box_near_crop_edge", "path": "sam/segment_anything/utils/amg.py", "snippet": "def is_box_near_crop_edge(\r\n boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0\r\n) -> torch.Tensor:\r\n \"\"\"Filter masks at the edge of a crop, but not at the edge of the original image.\"\"\"\r\n crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)\r\n orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)\r\n boxes = uncrop_boxes_xyxy(boxes, crop_box).float()\r\n near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)\r\n near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)\r\n near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)\r\n return torch.any(near_crop_edge, dim=1)\r" }, { "identifier": "mask_to_rle_pytorch", "path": "sam/segment_anything/utils/amg.py", "snippet": "def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:\r\n \"\"\"\r\n Encodes masks to an uncompressed RLE, in the format expected by\r\n pycoco tools.\r\n \"\"\"\r\n # Put in fortran order and flatten h,w\r\n b, h, w = tensor.shape\r\n tensor = tensor.permute(0, 2, 1).flatten(1)\r\n\r\n # Compute change indices\r\n diff = tensor[:, 1:] ^ tensor[:, :-1]\r\n change_indices = diff.nonzero()\r\n\r\n # Encode run length\r\n out = []\r\n for i in range(b):\r\n cur_idxs = change_indices[change_indices[:, 0] == i, 1]\r\n cur_idxs = torch.cat(\r\n [\r\n torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),\r\n cur_idxs + 1,\r\n torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),\r\n ]\r\n )\r\n btw_idxs = cur_idxs[1:] - cur_idxs[:-1]\r\n counts = [] if tensor[i, 0] == 0 else [0]\r\n counts.extend(btw_idxs.detach().cpu().tolist())\r\n out.append({\"size\": [h, w], \"counts\": counts})\r\n return out\r" }, { "identifier": "remove_small_regions", "path": "sam/segment_anything/utils/amg.py", "snippet": "def remove_small_regions(\r\n mask: np.ndarray, area_thresh: float, mode: str\r\n) -> Tuple[np.ndarray, bool]:\r\n \"\"\"\r\n Removes small disconnected regions and holes in a mask. Returns the\r\n mask and an indicator of if the mask has been modified.\r\n \"\"\"\r\n import cv2 # type: ignore\r\n\r\n assert mode in [\"holes\", \"islands\"]\r\n correct_holes = mode == \"holes\"\r\n working_mask = (correct_holes ^ mask).astype(np.uint8)\r\n n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)\r\n sizes = stats[:, -1][1:] # Row 0 is background label\r\n small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]\r\n if len(small_regions) == 0:\r\n return mask, False\r\n fill_labels = [0] + small_regions\r\n if not correct_holes:\r\n fill_labels = [i for i in range(n_labels) if i not in fill_labels]\r\n # If every region is below threshold, keep largest\r\n if len(fill_labels) == 0:\r\n fill_labels = [int(np.argmax(sizes)) + 1]\r\n mask = np.isin(regions, fill_labels)\r\n return mask, True\r" }, { "identifier": "rle_to_mask", "path": "sam/segment_anything/utils/amg.py", "snippet": "def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:\r\n \"\"\"Compute a binary mask from an uncompressed RLE.\"\"\"\r\n h, w = rle[\"size\"]\r\n mask = np.empty(h * w, dtype=bool)\r\n idx = 0\r\n parity = False\r\n for count in rle[\"counts\"]:\r\n mask[idx : idx + count] = parity\r\n idx += count\r\n parity ^= True\r\n mask = mask.reshape(w, h)\r\n return mask.transpose() # Put in C order\r" }, { "identifier": "uncrop_boxes_xyxy", "path": "sam/segment_anything/utils/amg.py", "snippet": "def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\r\n x0, y0, _, _ = crop_box\r\n offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)\r\n # Check if boxes has a channel dimension\r\n if len(boxes.shape) == 3:\r\n offset = offset.unsqueeze(1)\r\n return boxes + offset\r" }, { "identifier": "uncrop_masks", "path": "sam/segment_anything/utils/amg.py", "snippet": "def uncrop_masks(\r\n masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int\r\n) -> torch.Tensor:\r\n x0, y0, x1, y1 = crop_box\r\n if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:\r\n return masks\r\n # Coordinate transform masks\r\n pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)\r\n pad = (x0, pad_x - x0, y0, pad_y - y0)\r\n return torch.nn.functional.pad(masks, pad, value=0)\r" }, { "identifier": "uncrop_points", "path": "sam/segment_anything/utils/amg.py", "snippet": "def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\r\n x0, y0, _, _ = crop_box\r\n offset = torch.tensor([[x0, y0]], device=points.device)\r\n # Check if points has a channel dimension\r\n if len(points.shape) == 3:\r\n offset = offset.unsqueeze(1)\r\n return points + offset\r" } ]
import numpy as np import torch import cv2 # type: ignore # noqa: F401 from torchvision.ops.boxes import batched_nms, box_area # type: ignore from typing import Any, Dict, List, Optional, Tuple from .modeling import Sam from .predictor import SamPredictor from .utils.amg import ( MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh, build_all_layer_point_grids, calculate_stability_score, coco_encode_rle, generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions, rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points, ) from pycocotools import mask as mask_utils # type: ignore # noqa: F401
11,251
"crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray, multimask_output: bool = True) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes( orig_size, self.crop_n_layers, self.crop_overlap_ratio ) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size, multimask_output) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], multimask_output: bool = True, ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size, multimask_output) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) data["points"] = uncrop_points(data["points"], crop_box) data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], multimask_output: bool = True, ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=multimask_output, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data["iou_preds"] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data["stability_score"] = calculate_stability_score( data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset ) if self.stability_score_thresh > 0.0: keep_mask = data["stability_score"] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data["masks"] = data["masks"] > self.predictor.model.mask_threshold
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. class SamAutomaticMaskGenerator: def __init__( self, model: Sam, points_per_side: Optional[int] = 32, points_per_batch: int = 64, pred_iou_thresh: float = 0.88, stability_score_thresh: float = 0.95, stability_score_offset: float = 1.0, box_nms_thresh: float = 0.7, crop_n_layers: int = 0, crop_nms_thresh: float = 0.7, crop_overlap_ratio: float = 512 / 1500, crop_n_points_downscale_factor: int = 1, point_grids: Optional[List[np.ndarray]] = None, min_mask_region_area: int = 0, output_mode: str = "binary_mask", ) -> None: """ Using a SAM model, generates masks for the entire image. Generates a grid of point prompts over the image, then filters low quality and duplicate masks. The default settings are chosen for SAM with a ViT-H backbone. Arguments: model (Sam): The SAM model to use for mask prediction. points_per_side (int or None): The number of points to be sampled along one side of the image. The total number of points is points_per_side**2. If None, 'point_grids' must provide explicit point sampling. points_per_batch (int): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. pred_iou_thresh (float): A filtering threshold in [0,1], using the model's predicted mask quality. stability_score_thresh (float): A filtering threshold in [0,1], using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (float): The amount to shift the cutoff when calculated the stability score. box_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crop_n_layers (int): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crop_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray) or None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != ( point_grids is None ), "Exactly one of points_per_side or point_grid must be provided." if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in [ "binary_mask", "uncompressed_rle", "coco_rle", ], f"Unknown output_mode {output_mode}." if output_mode == "coco_rle": if min_mask_region_area > 0: self.predictor = SamPredictor(model) self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode @torch.no_grad() def generate(self, image: np.ndarray, multimask_output: bool = True) -> List[Dict[str, Any]]: """ Generates masks for the given image. Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: segmentation (dict(str, any) or np.ndarray): The mask. If output_mode='binary_mask', is an array of shape HW. Otherwise, is a dictionary containing the RLE. bbox (list(float)): The box around the mask, in XYWH format. area (int): The area in pixels of the mask. predicted_iou (float): The model's own prediction of the mask's quality. This is filtered by the pred_iou_thresh parameter. point_coords (list(list(float))): The point coordinates input to the model to generate this mask. stability_score (float): A measure of the mask's quality. This is filtered on using the stability_score_thresh parameter. crop_box (list(float)): The crop of the image used to generate the mask, given in XYWH format. """ # Generate masks mask_data = self._generate_masks(image, multimask_output) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions( mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]] elif self.output_mode == "binary_mask": mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] else: mask_data["segmentations"] = mask_data["rles"] # Write mask records curr_anns = [] for idx in range(len(mask_data["segmentations"])): ann = { "segmentation": mask_data["segmentations"][idx], "area": area_from_rle(mask_data["rles"][idx]), "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), "predicted_iou": mask_data["iou_preds"][idx].item(), "point_coords": [mask_data["points"][idx].tolist()], "stability_score": mask_data["stability_score"][idx].item(), "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray, multimask_output: bool = True) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes( orig_size, self.crop_n_layers, self.crop_overlap_ratio ) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size, multimask_output) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], multimask_output: bool = True, ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size, multimask_output) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) data["points"] = uncrop_points(data["points"], crop_box) data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], multimask_output: bool = True, ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=multimask_output, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data["iou_preds"] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data["stability_score"] = calculate_stability_score( data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset ) if self.stability_score_thresh > 0.0: keep_mask = data["stability_score"] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data["masks"] = data["masks"] > self.predictor.model.mask_threshold
data["boxes"] = batched_mask_to_box(data["masks"])
5
2023-10-14 13:45:54+00:00
16k
zhaoyizhou1123/mbrcsl
examples/roboverse/run_mbcql_roboverse.py
[ { "identifier": "MLP", "path": "offlinerlkit/nets/mlp.py", "snippet": "class MLP(nn.Module):\n def __init__(\n self,\n input_dim: int,\n hidden_dims: Union[List[int], Tuple[int]],\n output_dim: Optional[int] = None,\n activation: nn.Module = nn.ReLU,\n dropout_rate: Optional[float] = None,\n init_last: bool = False\n ) -> None:\n super().__init__()\n hidden_dims = [input_dim] + list(hidden_dims)\n model = []\n for in_dim, out_dim in zip(hidden_dims[:-1], hidden_dims[1:]):\n model += [nn.Linear(in_dim, out_dim), activation()]\n if dropout_rate is not None:\n model += [nn.Dropout(p=dropout_rate)]\n\n self.output_dim = hidden_dims[-1]\n if output_dim is not None:\n last_layer = nn.Linear(hidden_dims[-1], output_dim)\n if init_last:\n nn.init.xavier_uniform_(last_layer.weight, gain=1e-2)\n nn.init.constant_(last_layer.bias, 0.0)\n model += [last_layer]\n self.output_dim = output_dim\n self.model = nn.Sequential(*model)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return self.model(x)" }, { "identifier": "ActorProb", "path": "offlinerlkit/modules/actor_module.py", "snippet": "class ActorProb(nn.Module):\n def __init__(\n self,\n backbone: nn.Module,\n dist_net: nn.Module,\n device: str = \"cpu\"\n ) -> None:\n super().__init__()\n\n self.device = torch.device(device)\n self.backbone = backbone.to(device)\n self.dist_net = dist_net.to(device)\n\n def forward(self, obs: Union[np.ndarray, torch.Tensor]) -> torch.distributions.Normal:\n obs = torch.as_tensor(obs, device=self.device, dtype=torch.float32)\n logits = self.backbone(obs)\n dist = self.dist_net(logits)\n return dist" }, { "identifier": "Critic", "path": "offlinerlkit/modules/critic_module.py", "snippet": "class Critic(nn.Module):\n def __init__(self, backbone: nn.Module, device: str = \"cpu\") -> None:\n super().__init__()\n\n self.device = torch.device(device)\n self.backbone = backbone.to(device)\n latent_dim = getattr(backbone, \"output_dim\")\n self.last = nn.Linear(latent_dim, 1).to(device)\n\n def forward(\n self,\n obs: Union[np.ndarray, torch.Tensor],\n actions: Optional[Union[np.ndarray, torch.Tensor]] = None\n ) -> torch.Tensor:\n obs = torch.as_tensor(obs, device=self.device, dtype=torch.float32)\n if actions is not None:\n actions = torch.as_tensor(actions, device=self.device, dtype=torch.float32).flatten(1)\n obs = torch.cat([obs, actions], dim=1)\n logits = self.backbone(obs)\n values = self.last(logits)\n return values" }, { "identifier": "TanhDiagGaussian", "path": "offlinerlkit/modules/dist_module.py", "snippet": "class TanhDiagGaussian(DiagGaussian):\n def __init__(\n self,\n latent_dim,\n output_dim,\n unbounded=False,\n conditioned_sigma=False,\n max_mu=1.0,\n sigma_min=-5.0,\n sigma_max=2.0\n ):\n super().__init__(\n latent_dim=latent_dim,\n output_dim=output_dim,\n unbounded=unbounded,\n conditioned_sigma=conditioned_sigma,\n max_mu=max_mu,\n sigma_min=sigma_min,\n sigma_max=sigma_max\n )\n\n def forward(self, logits):\n mu = self.mu(logits)\n if not self._unbounded:\n mu = self._max * torch.tanh(mu)\n if self._c_sigma:\n sigma = torch.clamp(self.sigma(logits), min=self._sigma_min, max=self._sigma_max).exp()\n else:\n shape = [1] * len(mu.shape)\n shape[1] = -1\n sigma = (self.sigma_param.view(shape) + torch.zeros_like(mu)).exp()\n return TanhNormalWrapper(mu, sigma)" }, { "identifier": "ReplayBuffer", "path": "offlinerlkit/buffer/buffer.py", "snippet": "class ReplayBuffer:\n def __init__(\n self,\n buffer_size: int,\n obs_shape: Tuple,\n obs_dtype: np.dtype,\n action_dim: int,\n action_dtype: np.dtype,\n device: str = \"cpu\"\n ) -> None:\n self._max_size = buffer_size\n self.obs_shape = obs_shape\n self.obs_dtype = obs_dtype\n self.action_dim = action_dim\n self.action_dtype = action_dtype\n\n self._ptr = 0\n self._size = 0\n\n self.observations = np.zeros((self._max_size,) + self.obs_shape, dtype=obs_dtype)\n self.next_observations = np.zeros((self._max_size,) + self.obs_shape, dtype=obs_dtype)\n self.actions = np.zeros((self._max_size, self.action_dim), dtype=action_dtype)\n self.rewards = np.zeros((self._max_size, 1), dtype=np.float32)\n self.terminals = np.zeros((self._max_size, 1), dtype=np.float32)\n\n self.device = torch.device(device)\n\n def add(\n self,\n obs: np.ndarray,\n next_obs: np.ndarray,\n action: np.ndarray,\n reward: np.ndarray,\n terminal: np.ndarray\n ) -> None:\n # Copy to avoid modification by reference\n self.observations[self._ptr] = np.array(obs).copy()\n self.next_observations[self._ptr] = np.array(next_obs).copy()\n self.actions[self._ptr] = np.array(action).copy()\n self.rewards[self._ptr] = np.array(reward).copy()\n self.terminals[self._ptr] = np.array(terminal).copy()\n\n self._ptr = (self._ptr + 1) % self._max_size\n self._size = min(self._size + 1, self._max_size)\n \n def add_batch(\n self,\n obss: np.ndarray,\n next_obss: np.ndarray,\n actions: np.ndarray,\n rewards: np.ndarray,\n terminals: np.ndarray\n ) -> None:\n batch_size = len(obss)\n indexes = np.arange(self._ptr, self._ptr + batch_size) % self._max_size\n\n self.observations[indexes] = np.array(obss).copy()\n self.next_observations[indexes] = np.array(next_obss).copy()\n self.actions[indexes] = np.array(actions).copy()\n self.rewards[indexes] = np.array(rewards).copy()\n self.terminals[indexes] = np.array(terminals).copy()\n\n self._ptr = (self._ptr + batch_size) % self._max_size\n self._size = min(self._size + batch_size, self._max_size)\n \n def load_dataset(self, dataset: Dict[str, np.ndarray]) -> None:\n observations = np.array(dataset[\"observations\"], dtype=self.obs_dtype)\n next_observations = np.array(dataset[\"next_observations\"], dtype=self.obs_dtype)\n actions = np.array(dataset[\"actions\"], dtype=self.action_dtype)\n rewards = np.array(dataset[\"rewards\"], dtype=np.float32).reshape(-1, 1)\n terminals = np.array(dataset[\"terminals\"], dtype=np.float32).reshape(-1, 1)\n\n self.observations = observations\n self.next_observations = next_observations\n self.actions = actions\n self.rewards = rewards\n self.terminals = terminals\n\n self._ptr = len(observations)\n self._size = len(observations)\n \n def normalize_obs(self, eps: float = 1e-3) -> Tuple[np.ndarray, np.ndarray]:\n mean = self.observations.mean(0, keepdims=True)\n std = self.observations.std(0, keepdims=True) + eps\n self.observations = (self.observations - mean) / std\n self.next_observations = (self.next_observations - mean) / std\n obs_mean, obs_std = mean, std\n return obs_mean, obs_std\n\n def sample(self, batch_size: int) -> Dict[str, torch.Tensor]:\n\n batch_indexes = np.random.randint(0, self._size, size=batch_size)\n \n return {\n \"observations\": torch.tensor(self.observations[batch_indexes]).to(self.device),\n \"actions\": torch.tensor(self.actions[batch_indexes]).to(self.device),\n \"next_observations\": torch.tensor(self.next_observations[batch_indexes]).to(self.device),\n \"terminals\": torch.tensor(self.terminals[batch_indexes]).to(self.device),\n \"rewards\": torch.tensor(self.rewards[batch_indexes]).to(self.device)\n }\n \n def sample_all(self) -> Dict[str, np.ndarray]:\n return {\n \"observations\": self.observations[:self._size].copy(),\n \"actions\": self.actions[:self._size].copy(),\n \"next_observations\": self.next_observations[:self._size].copy(),\n \"terminals\": self.terminals[:self._size].copy(),\n \"rewards\": self.rewards[:self._size].copy()\n }" }, { "identifier": "Logger", "path": "offlinerlkit/utils/logger.py", "snippet": "class Logger(object):\n def __init__(self, dir: str, ouput_config: Dict) -> None:\n self._dir = dir\n self._init_dirs()\n self._init_ouput_handlers(ouput_config)\n self._name2val = defaultdict(float)\n self._name2cnt = defaultdict(int)\n self._level = INFO\n self._timestep = 0\n \n def _init_dirs(self) -> None:\n self._record_dir = os.path.join(self._dir, \"record\")\n self._checkpoint_dir = os.path.join(self._dir, \"checkpoint\")\n self._model_dir = os.path.join(self._dir, \"model\")\n self._result_dir = os.path.join(self._dir, \"result\")\n os.mkdir(self._record_dir)\n os.mkdir(self._checkpoint_dir)\n os.mkdir(self._model_dir)\n os.mkdir(self._result_dir)\n \n def _init_ouput_handlers(self, output_config: Dict) -> None:\n self._output_handlers = []\n for file_name, fmt in output_config.items():\n try:\n self._output_handlers.append(HANDLER[fmt](os.path.join(self._record_dir, file_name)))\n except KeyError:\n warnings.warn(\"Invalid output type, Valid types: stdout, csv, tensorboard\", DeprecationWarning)\n # default output to console\n self._output_handlers.append(StandardOutputHandler(sys.stdout))\n \n def log_hyperparameters(self, hyper_param: Dict) -> None:\n json_output_handler = JSONOutputHandler(os.path.join(self._record_dir, \"hyper_param\"))\n json_output_handler.writekvs(hyper_param)\n json_output_handler.close()\n for handler in self._output_handlers:\n if isinstance(handler, TensorBoardOutputHandler):\n handler.add_hyper_params_to_tb(hyper_param)\n\n def logkv(self, key: Any, val: Any) -> None:\n \"\"\"\n Log a value of some diagnostic\n Call this once for each diagnostic quantity, each iteration\n If called many times, last value will be used.\n \"\"\"\n self._name2val[key] = val\n\n def logkv_mean(self, key: Any, val: Number) -> None:\n \"\"\"\n The same as logkv(), but if called many times, values averaged.\n \"\"\"\n oldval, cnt = self._name2val[key], self._name2cnt[key]\n self._name2val[key] = oldval*cnt/(cnt+1) + val/(cnt+1)\n self._name2cnt[key] = cnt + 1\n\n def dumpkvs(self, exclude:Optional[Union[str, Tuple[str, ...]]]=None) -> None:\n # log timestep\n self.logkv(DEFAULT_X_NAME, self._timestep)\n for handler in self._output_handlers:\n if isinstance(handler, KVWriter):\n if exclude is not None and handler.handler_name in exclude:\n continue\n handler.writekvs(self._name2val)\n self._name2val.clear()\n self._name2cnt.clear()\n\n def log(self, s: str, level=INFO) -> None:\n for handler in self._output_handlers:\n if isinstance(handler, StandardOutputHandler):\n handler.writestr(s)\n \n def set_timestep(self, timestep: int) -> None:\n self._timestep = timestep\n for handler in self._output_handlers:\n if isinstance(handler, TensorBoardOutputHandler):\n handler.set_step(timestep)\n\n def set_level(self, level) -> None:\n self._level = level\n\n @property\n def record_dir(self) -> str:\n return self._record_dir\n \n @property\n def checkpoint_dir(self) -> str:\n return self._checkpoint_dir\n\n @property\n def model_dir(self) -> str:\n return self._model_dir\n \n @property\n def result_dir(self) -> str:\n return self._result_dir\n \n def close(self) -> None:\n for handler in self._output_handlers:\n handler.close()" }, { "identifier": "make_log_dirs", "path": "offlinerlkit/utils/logger.py", "snippet": "def make_log_dirs(\n task_name: str,\n algo_name: str,\n exp_name: str,\n args: Dict,\n part: Optional[str] = None,\n record_params: Optional[List]=None\n) -> str:\n if record_params is not None:\n for param_name in record_params:\n algo_name += f\"&{param_name}={args[param_name]}\"\n\n if part is not None:\n log_dirs = os.path.join(ROOT_DIR, task_name, algo_name, exp_name, part)\n else:\n log_dirs = os.path.join(ROOT_DIR, task_name, algo_name, exp_name)\n os.makedirs(log_dirs)\n return log_dirs" }, { "identifier": "MFPolicyTrainer", "path": "offlinerlkit/policy_trainer/mf_policy_trainer.py", "snippet": "class MFPolicyTrainer:\n def __init__(\n self,\n policy: BasePolicy,\n eval_env: gym.Env,\n buffer: ReplayBuffer,\n logger: Logger,\n epoch: int = 1000,\n step_per_epoch: int = 1000,\n batch_size: int = 256,\n eval_episodes: int = 10,\n lr_scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,\n horizon: Optional[int] = None,\n has_terminal = False,\n binary_ret = False,\n last_eval_episodes: Optional[int] = None\n ) -> None:\n '''\n binary_ret: If True, only output 0/1 for return\n '''\n self.policy = policy\n self.eval_env = eval_env\n self.buffer = buffer\n self.logger = logger\n\n self._epoch = epoch\n self._step_per_epoch = step_per_epoch\n self._batch_size = batch_size\n self._eval_episodes = eval_episodes\n self.lr_scheduler = lr_scheduler\n\n self.is_gymnasium_env = hasattr(self.eval_env, \"get_true_observation\")\n assert (not self.is_gymnasium_env) or (self.horizon is not None), \"Horizon must be specified for Gymnasium env\"\n self.has_terminal = has_terminal\n self.horizon = horizon\n self.binary_ret = binary_ret\n\n if last_eval_episodes is None:\n self._last_eval_episodes = eval_episodes\n else:\n self._last_eval_episodes = last_eval_episodes\n\n def train(self, last_eval = False) -> Dict[str, float]:\n start_time = time.time()\n\n num_timesteps = 0\n last_10_performance = deque(maxlen=10)\n # train loop\n for e in range(1, self._epoch + 1):\n\n self.policy.train()\n\n pbar = tqdm(range(self._step_per_epoch), desc=f\"Epoch #{e}/{self._epoch}\")\n for it in pbar:\n batch = self.buffer.sample(self._batch_size)\n loss = self.policy.learn(batch)\n pbar.set_postfix(**loss)\n\n for k, v in loss.items():\n self.logger.logkv_mean(k, v)\n \n num_timesteps += 1\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n \n # evaluate current policy\n if last_eval and e < self._epoch: # When last_eval is True, only evaluate on last epoch\n pass\n else:\n if e < self._epoch:\n eval_info = self._evaluate()\n else:\n eval_info = self._evaluate(self._last_eval_episodes)\n ep_reward_mean, ep_reward_std = np.mean(eval_info[\"eval/episode_reward\"]), np.std(eval_info[\"eval/episode_reward\"])\n ep_length_mean, ep_length_std = np.mean(eval_info[\"eval/episode_length\"]), np.std(eval_info[\"eval/episode_length\"])\n if hasattr(self.eval_env, \"get_normalized_score\"):\n norm_ep_rew_mean = self.eval_env.get_normalized_score(ep_reward_mean) * 100\n norm_ep_rew_std = self.eval_env.get_normalized_score(ep_reward_std) * 100\n last_10_performance.append(norm_ep_rew_mean)\n self.logger.logkv(\"eval/normalized_episode_reward\", norm_ep_rew_mean)\n self.logger.logkv(\"eval/normalized_episode_reward_std\", norm_ep_rew_std)\n self.logger.logkv(\"eval/episode_reward\", ep_reward_mean)\n self.logger.logkv(\"eval/episode_reward_std\", ep_reward_std)\n\n self.logger.logkv(\"eval/episode_length\", ep_length_mean)\n self.logger.logkv(\"eval/episode_length_std\", ep_length_std)\n self.logger.set_timestep(num_timesteps)\n self.logger.dumpkvs()\n \n # save checkpoint\n torch.save(self.policy.state_dict(), os.path.join(self.logger.checkpoint_dir, \"policy.pth\"))\n\n self.logger.log(\"total time: {:.2f}s\".format(time.time() - start_time))\n torch.save(self.policy.state_dict(), os.path.join(self.logger.model_dir, \"policy.pth\"))\n self.logger.close()\n\n return {\"last_10_performance\": np.mean(last_10_performance)}\n\n def _evaluate(self, episodes: Optional[int] = None) -> Dict[str, List[float]]:\n self.policy.eval()\n obs = self.eval_env.reset()\n eval_ep_info_buffer = []\n num_episodes = 0\n episode_reward, episode_length = 0, 0\n\n if episodes is not None:\n total_episodes = episodes\n else:\n total_episodes = self._eval_episodes\n\n if not self.has_terminal: # Finite horizon, terminal is unimportant\n while num_episodes < total_episodes:\n for timestep in range(self.horizon): # One epoch\n # print(f\"Timestep {timestep}, obs {obs}\")\n action = self.policy.select_action(obs.reshape(1, -1), deterministic=True)\n if hasattr(self.eval_env, \"get_true_observation\"): # gymnasium env \n next_obs, reward, terminal, _, _ = self.eval_env.step(action.flatten())\n else:\n next_obs, reward, terminal, _ = self.eval_env.step(action.flatten())\n if self.is_gymnasium_env:\n next_obs = self.eval_env.get_true_observation(next_obs)\n episode_reward += reward\n episode_length += 1\n\n obs = next_obs\n\n if self.binary_ret:\n episode_reward = 1 if episode_reward >= 1 else 0\n eval_ep_info_buffer.append(\n {\"episode_reward\": episode_reward, \"episode_length\": episode_length}\n )\n num_episodes +=1\n episode_reward, episode_length = 0, 0\n if self.is_gymnasium_env:\n obs, _ = self.eval_env.reset()\n obs = self.eval_env.get_true_observation(obs)\n else:\n obs = self.eval_env.reset()\n else:\n while num_episodes < total_episodes:\n action = self.policy.select_action(obs.reshape(1, -1), deterministic=True)\n if hasattr(self.eval_env, \"get_true_observation\"): # gymnasium env \n next_obs, reward, terminal, _, _ = self.eval_env.step(action.flatten())\n else:\n next_obs, reward, terminal, _ = self.eval_env.step(action.flatten())\n if self.is_gymnasium_env:\n next_obs = self.eval_env.get_true_observation(next_obs)\n episode_reward += reward\n episode_length += 1\n\n obs = next_obs\n\n if terminal: # Episode finishes\n if self.binary_ret:\n episode_reward = 1 if episode_reward >= 1 else 0\n eval_ep_info_buffer.append(\n {\"episode_reward\": episode_reward, \"episode_length\": episode_length}\n )\n num_episodes +=1\n episode_reward, episode_length = 0, 0\n if self.is_gymnasium_env:\n obs, _ = self.eval_env.reset()\n obs = self.eval_env.get_true_observation(obs)\n else:\n obs = self.eval_env.reset()\n \n return {\n \"eval/episode_reward\": [ep_info[\"episode_reward\"] for ep_info in eval_ep_info_buffer],\n \"eval/episode_length\": [ep_info[\"episode_length\"] for ep_info in eval_ep_info_buffer]\n }" }, { "identifier": "CQLPolicy", "path": "offlinerlkit/policy/model_free/cql.py", "snippet": "class CQLPolicy(SACPolicy):\n \"\"\"\n Conservative Q-Learning <Ref: https://arxiv.org/abs/2006.04779>\n \"\"\"\n\n def __init__(\n self,\n actor: nn.Module,\n critic1: nn.Module,\n critic2: nn.Module,\n actor_optim: torch.optim.Optimizer,\n critic1_optim: torch.optim.Optimizer,\n critic2_optim: torch.optim.Optimizer,\n action_space: gym.spaces.Space,\n tau: float = 0.005,\n gamma: float = 0.99,\n alpha: Union[float, Tuple[float, torch.Tensor, torch.optim.Optimizer]] = 0.2,\n cql_weight: float = 1.0,\n temperature: float = 1.0,\n max_q_backup: bool = False,\n deterministic_backup: bool = True,\n with_lagrange: bool = True,\n lagrange_threshold: float = 10.0,\n cql_alpha_lr: float = 1e-4,\n num_repeart_actions:int = 10,\n ) -> None:\n super().__init__(\n actor,\n critic1,\n critic2,\n actor_optim,\n critic1_optim,\n critic2_optim,\n tau=tau,\n gamma=gamma,\n alpha=alpha\n )\n\n self.action_space = action_space\n self._cql_weight = cql_weight\n self._temperature = temperature\n self._max_q_backup = max_q_backup\n self._deterministic_backup = deterministic_backup\n self._with_lagrange = with_lagrange\n self._lagrange_threshold = lagrange_threshold\n\n self.cql_log_alpha = torch.zeros(1, requires_grad=True, device=self.actor.device)\n self.cql_alpha_optim = torch.optim.Adam([self.cql_log_alpha], lr=cql_alpha_lr)\n\n self._num_repeat_actions = num_repeart_actions\n\n def calc_pi_values(\n self,\n obs_pi: torch.Tensor,\n obs_to_pred: torch.Tensor\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n act, log_prob = self.actforward(obs_pi)\n\n q1 = self.critic1(obs_to_pred, act)\n q2 = self.critic2(obs_to_pred, act)\n\n return q1 - log_prob.detach(), q2 - log_prob.detach()\n\n def calc_random_values(\n self,\n obs: torch.Tensor,\n random_act: torch.Tensor\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n q1 = self.critic1(obs, random_act)\n q2 = self.critic2(obs, random_act)\n\n log_prob1 = np.log(0.5**random_act.shape[-1])\n log_prob2 = np.log(0.5**random_act.shape[-1])\n\n return q1 - log_prob1, q2 - log_prob2\n\n def learn(self, batch: Dict) -> Dict[str, float]:\n obss, actions, next_obss, rewards, terminals = batch[\"observations\"], batch[\"actions\"], \\\n batch[\"next_observations\"], batch[\"rewards\"], batch[\"terminals\"]\n batch_size = obss.shape[0]\n \n # update actor\n a, log_probs = self.actforward(obss)\n q1a, q2a = self.critic1(obss, a), self.critic2(obss, a)\n actor_loss = (self._alpha * log_probs - torch.min(q1a, q2a)).mean()\n self.actor_optim.zero_grad()\n actor_loss.backward()\n self.actor_optim.step()\n\n if self._is_auto_alpha:\n log_probs = log_probs.detach() + self._target_entropy\n alpha_loss = -(self._log_alpha * log_probs).mean()\n self.alpha_optim.zero_grad()\n alpha_loss.backward()\n self.alpha_optim.step()\n self._alpha = self._log_alpha.detach().exp()\n \n # compute td error\n if self._max_q_backup:\n with torch.no_grad():\n tmp_next_obss = next_obss.unsqueeze(1) \\\n .repeat(1, self._num_repeat_actions, 1) \\\n .view(batch_size * self._num_repeat_actions, next_obss.shape[-1])\n tmp_next_actions, _ = self.actforward(tmp_next_obss)\n tmp_next_q1 = self.critic1_old(tmp_next_obss, tmp_next_actions) \\\n .view(batch_size, self._num_repeat_actions, 1) \\\n .max(1)[0].view(-1, 1)\n tmp_next_q2 = self.critic2_old(tmp_next_obss, tmp_next_actions) \\\n .view(batch_size, self._num_repeat_actions, 1) \\\n .max(1)[0].view(-1, 1)\n next_q = torch.min(tmp_next_q1, tmp_next_q2)\n else:\n with torch.no_grad():\n next_actions, next_log_probs = self.actforward(next_obss)\n next_q = torch.min(\n self.critic1_old(next_obss, next_actions),\n self.critic2_old(next_obss, next_actions)\n )\n if not self._deterministic_backup:\n next_q -= self._alpha * next_log_probs\n\n target_q = rewards + self._gamma * (1 - terminals) * next_q\n q1, q2 = self.critic1(obss, actions), self.critic2(obss, actions)\n critic1_loss = ((q1 - target_q).pow(2)).mean()\n critic2_loss = ((q2 - target_q).pow(2)).mean()\n\n # compute conservative loss\n random_actions = torch.FloatTensor(\n batch_size * self._num_repeat_actions, actions.shape[-1]\n ).uniform_(self.action_space.low[0], self.action_space.high[0]).to(self.actor.device)\n # tmp_obss & tmp_next_obss: (batch_size * num_repeat, obs_dim)\n tmp_obss = obss.unsqueeze(1) \\\n .repeat(1, self._num_repeat_actions, 1) \\\n .view(batch_size * self._num_repeat_actions, obss.shape[-1])\n tmp_next_obss = next_obss.unsqueeze(1) \\\n .repeat(1, self._num_repeat_actions, 1) \\\n .view(batch_size * self._num_repeat_actions, obss.shape[-1])\n \n obs_pi_value1, obs_pi_value2 = self.calc_pi_values(tmp_obss, tmp_obss)\n next_obs_pi_value1, next_obs_pi_value2 = self.calc_pi_values(tmp_next_obss, tmp_obss)\n random_value1, random_value2 = self.calc_random_values(tmp_obss, random_actions)\n\n for value in [\n obs_pi_value1, obs_pi_value2, next_obs_pi_value1, next_obs_pi_value2,\n random_value1, random_value2\n ]:\n value.reshape(batch_size, self._num_repeat_actions, 1)\n \n # cat_q shape: (batch_size, 3 * num_repeat, 1)\n cat_q1 = torch.cat([obs_pi_value1, next_obs_pi_value1, random_value1], 1)\n cat_q2 = torch.cat([obs_pi_value2, next_obs_pi_value2, random_value2], 1)\n\n conservative_loss1 = \\\n torch.logsumexp(cat_q1 / self._temperature, dim=1).mean() * self._cql_weight * self._temperature - \\\n q1.mean() * self._cql_weight\n conservative_loss2 = \\\n torch.logsumexp(cat_q2 / self._temperature, dim=1).mean() * self._cql_weight * self._temperature - \\\n q2.mean() * self._cql_weight\n \n if self._with_lagrange:\n cql_alpha = torch.clamp(self.cql_log_alpha.exp(), 0.0, 1e6)\n conservative_loss1 = cql_alpha * (conservative_loss1 - self._lagrange_threshold)\n conservative_loss2 = cql_alpha * (conservative_loss2 - self._lagrange_threshold)\n\n self.cql_alpha_optim.zero_grad()\n cql_alpha_loss = -(conservative_loss1 + conservative_loss2) * 0.5\n cql_alpha_loss.backward(retain_graph=True)\n self.cql_alpha_optim.step()\n \n critic1_loss = critic1_loss + conservative_loss1\n critic2_loss = critic2_loss + conservative_loss2\n\n # update critic\n self.critic1_optim.zero_grad()\n critic1_loss.backward(retain_graph=True)\n self.critic1_optim.step()\n\n self.critic2_optim.zero_grad()\n critic2_loss.backward()\n self.critic2_optim.step()\n\n self._sync_weight()\n\n result = {\n \"loss/actor\": actor_loss.item(),\n \"loss/critic1\": critic1_loss.item(),\n \"loss/critic2\": critic2_loss.item()\n }\n\n if self._is_auto_alpha:\n result[\"loss/alpha\"] = alpha_loss.item()\n result[\"alpha\"] = self._alpha.item()\n if self._with_lagrange:\n result[\"loss/cql_alpha\"] = cql_alpha_loss.item()\n result[\"cql_alpha\"] = cql_alpha.item()\n \n return result" }, { "identifier": "PickPlaceObsWrapper", "path": "offlinerlkit/utils/roboverse_utils.py", "snippet": "class PickPlaceObsWrapper(gym.ObservationWrapper):\n '''\n Wrap pick place environment to return desired obs\n '''\n def __init__(self, env):\n super().__init__(env)\n # Get observation space\n tmp_obs = env.reset()\n\n tmp_true_obs = get_pickplace_obs(tmp_obs)\n low = env.observation_space['state'].low[0]\n high = env.observation_space['state'].high[0]\n self.observation_space = Box(shape = tmp_true_obs.shape, low = low, high = high)\n\n def observation(self, observation: Dict[str, np.ndarray]) -> np.ndarray:\n return get_pickplace_obs(observation)\n\n def reset(self, seed = None):\n if seed is not None:\n np.random.seed(seed) # controls env seed\n return self.observation(self.env.reset())" }, { "identifier": "DoubleDrawerObsWrapper", "path": "offlinerlkit/utils/roboverse_utils.py", "snippet": "class DoubleDrawerObsWrapper(gym.Wrapper):\n '''\n Wrap pick place environment to return desired obs\n '''\n def __init__(self, env):\n super().__init__(env)\n # Get observation space\n tmp_obs = env.reset()\n info = env.get_info()\n\n tmp_true_obs = get_doubledrawer_obs(tmp_obs, info)\n low = env.observation_space['state'].low[0]\n high = env.observation_space['state'].high[0]\n self.observation_space = Box(shape = tmp_true_obs.shape, low = low, high = high)\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n obs = get_doubledrawer_obs(obs, info)\n return obs, reward, done, info\n\n def reset(self, seed = None):\n if seed is not None:\n np.random.seed(seed) # controls env seed\n obs = self.env.reset()\n info = self.env.get_info()\n return get_doubledrawer_obs(obs, info)" }, { "identifier": "get_pickplace_dataset", "path": "offlinerlkit/utils/roboverse_utils.py", "snippet": "def get_pickplace_dataset(\n prior_data_path: str, \n task_data_path: str,\n prior_weight: float =1., \n task_weight: float = 1., \n set_type: str = 'full', \n sample_ratio: float = 1.) -> Tuple[Dict, np.ndarray]:\n '''\n Concatenate prior_data and task_data\n prior_weight and task_weight: weight of data point\n\n Args:\n set_type: 'prior', 'task', 'full'\n sample_ratio: Ratio of trajectories sampled. Sometimes we want to train on a smaller dataset.\n\n Return:\n dataset: Dict, additional key 'weights'\n init_obss: np.ndarray (num_traj, obs_dim)\n '''\n with open(prior_data_path, \"rb\") as fp:\n prior_data = np.load(fp, allow_pickle=True)\n with open(task_data_path, \"rb\") as ft:\n task_data = np.load(ft, allow_pickle=True)\n set_weight(prior_data, prior_weight)\n set_weight(task_data, task_weight)\n\n # Sample trajectories\n num_trajs_prior = int(len(prior_data) * sample_ratio)\n idxs_prior = np.random.choice(len(prior_data), size=(num_trajs_prior), replace = False)\n prior_data = prior_data[idxs_prior]\n\n num_trajs_task = int(len(task_data) * sample_ratio)\n idxs_task = np.random.choice(len(task_data), size=(num_trajs_task), replace = False)\n task_data = task_data[idxs_task]\n\n if set_type == 'full':\n full_data = np.concatenate([prior_data, task_data], axis=0) # list of dict\n elif set_type == 'prior':\n full_data = prior_data\n elif set_type =='task':\n full_data = task_data\n keys = ['observations', 'actions', 'rewards', 'next_observations', 'terminals', 'weights']\n\n init_obss = []\n for d in prior_data:\n obs_list = d['observations']\n init_obss.append(get_pickplace_obs(obs_list[0]))\n \n dict_data = {}\n for key in keys:\n values = []\n for d in full_data: # trajectory, dict of lists\n value_list = d[key] # list of timesteps data\n if key == 'observations':\n values += [get_pickplace_obs(obs) for obs in value_list] # element is list\n elif key == 'next_observations':\n values += [get_pickplace_obs(obs) for obs in value_list] # element is list\n else:\n values += value_list # element is list\n values = np.asarray(values)\n dict_data[key] = values\n rtgs = np.zeros_like(dict_data['rewards']) # no return\n dict_data['rtgs'] = rtgs\n\n init_obss = np.asarray(init_obss)\n return dict_data, init_obss" }, { "identifier": "get_doubledrawer_dataset", "path": "offlinerlkit/utils/roboverse_utils.py", "snippet": "def get_doubledrawer_dataset(\n prior_data_path: str, \n task_data_path: str,\n prior_weight: float =1., \n task_weight: float = 1., \n set_type: str = 'full', \n sample_ratio: float = 1.) -> Tuple[Dict, np.ndarray]:\n '''\n Concatenate prior_data and task_data\n prior_weight and task_weight: weight of data point\n\n Args:\n set_type: 'prior', 'task', 'full'\n sample_ratio: Ratio of trajectories sampled. Sometimes we want to train on a smaller dataset.\n\n Return:\n dataset: Dict, additional key 'weights'\n init_obss: np.ndarray (num_traj, obs_dim)\n '''\n with open(prior_data_path, \"rb\") as fp:\n prior_data = np.load(fp, allow_pickle=True)\n with open(task_data_path, \"rb\") as ft:\n task_data = np.load(ft, allow_pickle=True)\n set_weight(prior_data, prior_weight)\n set_weight(task_data, task_weight)\n\n # Sample trajectories\n num_trajs_prior = int(len(prior_data) * sample_ratio)\n idxs_prior = np.random.choice(len(prior_data), size=(num_trajs_prior), replace = False)\n prior_data = prior_data[idxs_prior]\n\n num_trajs_task = int(len(task_data) * sample_ratio)\n idxs_task = np.random.choice(len(task_data), size=(num_trajs_task), replace = False)\n task_data = task_data[idxs_task]\n\n if set_type == 'full':\n full_data = np.concatenate([prior_data, task_data], axis=0) # list of dict\n elif set_type == 'prior':\n full_data = prior_data\n elif set_type =='task':\n full_data = task_data\n keys = ['observations', 'actions', 'rewards', 'next_observations', 'terminals', 'weights']\n\n init_obss = []\n for d in prior_data:\n obs_list = d['observations']\n info_list = d['env_infos']\n init_obss.append(get_doubledrawer_obs(obs_list[0], info_list[0]))\n \n dict_data = {}\n for key in keys:\n values = []\n for d in full_data: # trajectory, dict of lists\n value_list = d[key] # list of timesteps data\n if key == 'observations':\n info_list = d['env_infos']\n # initial info is similar to step 1\n values += [get_doubledrawer_obs(obs,info) for obs,info in zip(value_list, [info_list[0]] + info_list[:-1])]\n elif key == 'next_observations':\n info_list = d['env_infos']\n values += [get_doubledrawer_obs(obs,info) for obs,info in zip(value_list, info_list)]\n else:\n values += value_list # element is list\n values = np.asarray(values)\n dict_data[key] = values\n rtgs = np.zeros_like(dict_data['rewards']) # no return\n dict_data['rtgs'] = rtgs\n\n init_obss = np.asarray(init_obss)\n return dict_data, init_obss" } ]
import argparse import os import random import pickle import datetime import roboverse import numpy as np import torch from offlinerlkit.nets import MLP from offlinerlkit.modules import ActorProb, Critic, TanhDiagGaussian from offlinerlkit.buffer import ReplayBuffer from offlinerlkit.utils.logger import Logger, make_log_dirs from offlinerlkit.policy_trainer import MFPolicyTrainer from offlinerlkit.policy import CQLPolicy from offlinerlkit.utils.roboverse_utils import PickPlaceObsWrapper, DoubleDrawerObsWrapper, get_pickplace_dataset, get_doubledrawer_dataset
10,931
parser.add_argument("--algo-name", type=str, default="mbcql") parser.add_argument("--task", type=str, default="pickplace") parser.add_argument('--last_eval', action='store_false', help="Show eval result for every epoch if False") # env config (pickplace) parser.add_argument('--horizon', type=int, default=40, help="max path length for pickplace") parser.add_argument('--rollout_ckpt_path', type=str, required=True, help="dir path, used to load mbrcsl rollout trajectories" ) parser.add_argument("--seed", type=int, default=0) parser.add_argument("--hidden-dims", type=int, nargs='*', default=[256, 256, 256]) parser.add_argument("--actor-lr", type=float, default=1e-4) parser.add_argument("--critic-lr", type=float, default=3e-4) parser.add_argument("--gamma", type=float, default=0.99) parser.add_argument("--tau", type=float, default=0.005) parser.add_argument("--alpha", type=float, default=0.2) parser.add_argument("--target-entropy", type=int, default=None) parser.add_argument("--auto-alpha", default=True) parser.add_argument("--alpha-lr", type=float, default=1e-4) parser.add_argument("--cql-weight", type=float, default=5.0) parser.add_argument("--temperature", type=float, default=1.0) parser.add_argument("--max-q-backup", type=bool, default=False) parser.add_argument("--deterministic-backup", type=bool, default=True) parser.add_argument("--with-lagrange", type=bool, default=False) parser.add_argument("--lagrange-threshold", type=float, default=10.0) parser.add_argument("--cql-alpha-lr", type=float, default=3e-4) parser.add_argument("--num-repeat-actions", type=int, default=10) parser.add_argument("--epoch", type=int, default=200) parser.add_argument("--step-per-epoch", type=int, default=1000) parser.add_argument("--eval_episodes", type=int, default=100) parser.add_argument("--batch-size", type=int, default=256) parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu") return parser.parse_args() def train(args=get_args()): # seed random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.backends.cudnn.deterministic = True # create env and dataset if args.task == 'pickplace': env = roboverse.make('Widow250PickTray-v0') env = PickPlaceObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) data_path = os.path.join(args.rollout_ckpt_path, "rollout.dat") ckpt_dict = pickle.load(open(data_path,"rb")) # checkpoint in dict type rollout_data_all = ckpt_dict['data'] # should be dict num_traj_all = ckpt_dict['num_traj'] print(f"Loaded {num_traj_all} rollout trajectories") elif args.task == 'doubledraweropen': env = roboverse.make('Widow250DoubleDrawerOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) data_path = os.path.join(args.rollout_ckpt_path, "rollout.dat") ckpt_dict = pickle.load(open(data_path,"rb")) # checkpoint in dict type rollout_data_all = ckpt_dict['data'] # should be dict num_traj_all = ckpt_dict['num_traj'] print(f"Loaded {num_traj_all} rollout trajectories") elif args.task == 'doubledrawercloseopen': env = roboverse.make('Widow250DoubleDrawerCloseOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) data_path = os.path.join(args.rollout_ckpt_path, "rollout.dat") ckpt_dict = pickle.load(open(data_path,"rb")) # checkpoint in dict type rollout_data_all = ckpt_dict['data'] # should be dict num_traj_all = ckpt_dict['num_traj'] print(f"Loaded {num_traj_all} rollout trajectories") elif args.task == 'doubledrawerpickplaceopen': env = roboverse.make('Widow250DoubleDrawerPickPlaceOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) data_path = os.path.join(args.rollout_ckpt_path, "rollout.dat") ckpt_dict = pickle.load(open(data_path,"rb")) # checkpoint in dict type rollout_data_all = ckpt_dict['data'] # should be dict num_traj_all = ckpt_dict['num_traj'] print(f"Loaded {num_traj_all} rollout trajectories") else: raise NotImplementedError env.reset(seed=args.seed) # create policy model actor_backbone = MLP(input_dim=np.prod(args.obs_shape), hidden_dims=args.hidden_dims) critic1_backbone = MLP(input_dim=np.prod(args.obs_shape) + args.action_dim, hidden_dims=args.hidden_dims) critic2_backbone = MLP(input_dim=np.prod(args.obs_shape) + args.action_dim, hidden_dims=args.hidden_dims) dist = TanhDiagGaussian( latent_dim=getattr(actor_backbone, "output_dim"), output_dim=args.action_dim, unbounded=True, conditioned_sigma=True ) actor = ActorProb(actor_backbone, dist, args.device)
# MBCQL. Need rollout data from MBRCSL def get_args(): parser = argparse.ArgumentParser() parser.add_argument("--algo-name", type=str, default="mbcql") parser.add_argument("--task", type=str, default="pickplace") parser.add_argument('--last_eval', action='store_false', help="Show eval result for every epoch if False") # env config (pickplace) parser.add_argument('--horizon', type=int, default=40, help="max path length for pickplace") parser.add_argument('--rollout_ckpt_path', type=str, required=True, help="dir path, used to load mbrcsl rollout trajectories" ) parser.add_argument("--seed", type=int, default=0) parser.add_argument("--hidden-dims", type=int, nargs='*', default=[256, 256, 256]) parser.add_argument("--actor-lr", type=float, default=1e-4) parser.add_argument("--critic-lr", type=float, default=3e-4) parser.add_argument("--gamma", type=float, default=0.99) parser.add_argument("--tau", type=float, default=0.005) parser.add_argument("--alpha", type=float, default=0.2) parser.add_argument("--target-entropy", type=int, default=None) parser.add_argument("--auto-alpha", default=True) parser.add_argument("--alpha-lr", type=float, default=1e-4) parser.add_argument("--cql-weight", type=float, default=5.0) parser.add_argument("--temperature", type=float, default=1.0) parser.add_argument("--max-q-backup", type=bool, default=False) parser.add_argument("--deterministic-backup", type=bool, default=True) parser.add_argument("--with-lagrange", type=bool, default=False) parser.add_argument("--lagrange-threshold", type=float, default=10.0) parser.add_argument("--cql-alpha-lr", type=float, default=3e-4) parser.add_argument("--num-repeat-actions", type=int, default=10) parser.add_argument("--epoch", type=int, default=200) parser.add_argument("--step-per-epoch", type=int, default=1000) parser.add_argument("--eval_episodes", type=int, default=100) parser.add_argument("--batch-size", type=int, default=256) parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu") return parser.parse_args() def train(args=get_args()): # seed random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.backends.cudnn.deterministic = True # create env and dataset if args.task == 'pickplace': env = roboverse.make('Widow250PickTray-v0') env = PickPlaceObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) data_path = os.path.join(args.rollout_ckpt_path, "rollout.dat") ckpt_dict = pickle.load(open(data_path,"rb")) # checkpoint in dict type rollout_data_all = ckpt_dict['data'] # should be dict num_traj_all = ckpt_dict['num_traj'] print(f"Loaded {num_traj_all} rollout trajectories") elif args.task == 'doubledraweropen': env = roboverse.make('Widow250DoubleDrawerOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) data_path = os.path.join(args.rollout_ckpt_path, "rollout.dat") ckpt_dict = pickle.load(open(data_path,"rb")) # checkpoint in dict type rollout_data_all = ckpt_dict['data'] # should be dict num_traj_all = ckpt_dict['num_traj'] print(f"Loaded {num_traj_all} rollout trajectories") elif args.task == 'doubledrawercloseopen': env = roboverse.make('Widow250DoubleDrawerCloseOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) data_path = os.path.join(args.rollout_ckpt_path, "rollout.dat") ckpt_dict = pickle.load(open(data_path,"rb")) # checkpoint in dict type rollout_data_all = ckpt_dict['data'] # should be dict num_traj_all = ckpt_dict['num_traj'] print(f"Loaded {num_traj_all} rollout trajectories") elif args.task == 'doubledrawerpickplaceopen': env = roboverse.make('Widow250DoubleDrawerPickPlaceOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) data_path = os.path.join(args.rollout_ckpt_path, "rollout.dat") ckpt_dict = pickle.load(open(data_path,"rb")) # checkpoint in dict type rollout_data_all = ckpt_dict['data'] # should be dict num_traj_all = ckpt_dict['num_traj'] print(f"Loaded {num_traj_all} rollout trajectories") else: raise NotImplementedError env.reset(seed=args.seed) # create policy model actor_backbone = MLP(input_dim=np.prod(args.obs_shape), hidden_dims=args.hidden_dims) critic1_backbone = MLP(input_dim=np.prod(args.obs_shape) + args.action_dim, hidden_dims=args.hidden_dims) critic2_backbone = MLP(input_dim=np.prod(args.obs_shape) + args.action_dim, hidden_dims=args.hidden_dims) dist = TanhDiagGaussian( latent_dim=getattr(actor_backbone, "output_dim"), output_dim=args.action_dim, unbounded=True, conditioned_sigma=True ) actor = ActorProb(actor_backbone, dist, args.device)
critic1 = Critic(critic1_backbone, args.device)
2
2023-10-11 08:36:06+00:00
16k
wilhelmagren/finq
finq/portfolio.py
[ { "identifier": "Asset", "path": "finq/asset.py", "snippet": "class Asset(object):\n \"\"\" \"\"\"\n\n def __init__(\n self,\n data: pd.Series,\n name: str,\n *,\n market: Optional[str] = None,\n index_name: Optional[str] = None,\n price_type: str = \"Close\",\n pre_compute: bool = True,\n ):\n \"\"\" \"\"\"\n\n self._data = data\n self._name = name\n self._market = market\n self._index_name = index_name\n self._price_type = price_type\n self._pre_compute = pre_compute\n self._metrics = {}\n\n if pre_compute:\n log.info(\"pre-computing some common metrics...\")\n self.compute_common_metrics()\n log.info(\"OK!\")\n\n def __eq__(self, other: Any) -> bool:\n \"\"\"\n Compare self with the other object. If ``other`` is of instance class\n ``Asset`` then compare their hashes. Otherwise ``False``.\n\n Parameters\n ----------\n other : Any\n The other object to compare equality against.\n\n Returns\n -------\n bool\n Whether or not they objects are equal.\n\n \"\"\"\n if isinstance(other, self.__class__):\n return hash(self) == hash(other)\n return False\n\n def __hash__(self) -> int:\n \"\"\"\n Compute a hash from the following attributes of the ``Asset`` object:\n (`_name`, `_market_`, `_index_name`, `_price_type`).\n\n NOTE: the ``Asset`` object is mutable, thus, the hash functionality\n can have unknown side effects... Use responsibly.\n\n Returns\n -------\n int\n The computed hash value.\n\n \"\"\"\n return hash(\n (\n len(self._data),\n self._data.mean(),\n self._data.std(),\n self._name,\n self._market,\n self._index_name,\n self._price_type,\n )\n )\n\n def __str__(self) -> str:\n \"\"\" \"\"\"\n\n format = f\"<{self.__class__.__name__} called `{self._name}`\"\n if self._market:\n format += f\" on {self._market}\"\n if self._index_name:\n format += f\" in {self._index_name}\"\n\n format += f\" (price type: {self._price_type})\"\n format += f\"\\n-- num samples:\\t\\t\\t{self._data.shape[0]}\"\n\n drm = self._metrics.get(\"daily_returns_mean\", None)\n if drm:\n format += f\"\\n-- daily returns mean:\\t\\t{drm:.5f}\"\n\n yrm = self._metrics.get(\"yearly_returns_mean\", None)\n if yrm:\n format += f\"\\n-- yearly returns mean:\\t\\t{yrm:.5f}\"\n\n yv = self._metrics.get(\"yearly_volatility\", None)\n if yv:\n format += f\"\\n-- yearly volatility:\\t\\t{yv:.5f}\"\n\n skew = self._metrics.get(\"skewness\", None)\n if skew:\n format += f\"\\n-- unbiased skewness:\\t\\t{self._metrics['skewness']:.5f}\"\n\n format += f\"\\nobject located at {hex(id(self))}>\"\n\n return format\n\n def compute_common_metrics(self):\n \"\"\" \"\"\"\n self._metrics[\"daily_returns\"] = self.period_returns(period=1)\n self._metrics[\"daily_returns_mean\"] = self.period_returns_mean(period=1)\n self._metrics[\"yearly_returns_mean\"] = self.period_returns_mean(period=252)\n self._metrics[\"yearly_volatility\"] = self.volatility(period=1, trading_days=252)\n self._metrics[\"skewness\"] = self.skewness()\n\n def period_returns(self, period: int = 1) -> pd.Series:\n \"\"\" \"\"\"\n return self._data.pct_change(periods=period)\n\n def period_returns_mean(self, period: int = 1) -> np.typing.DTypeLike:\n \"\"\" \"\"\"\n return self.period_returns(period=period).mean(axis=0)\n\n def volatility(\n self, period: int = 1, trading_days: int = 252\n ) -> np.typing.DTypeLike:\n \"\"\" \"\"\"\n return self.period_returns(period=period).std() * np.sqrt(trading_days)\n\n def skewness(self) -> np.float32:\n \"\"\"\n Computes the skewness of the saved data. Uses the ``Adjusted Fisher-Pearson\n standardized moment coefficient`` formula without bias [1, 2]. Skewness is a\n measure of the asymmetry of the probability distribution for a real-valued\n random variable around its mean.\n\n Returns\n -------\n np.float32\n The skewness measure for the saved historical price data.\n\n References\n ----------\n [1] Skewness calculation on scipy.\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.skew.html\n [2] Moment calculation on scipy.\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.moment.html\n\n \"\"\"\n return self._data.skew().astype(np.float32)\n\n @property\n def data(self) -> pd.Series:\n \"\"\"\n Return the saved data by accessing it as a property of the ``Asset`` object.\n\n Returns\n -------\n pd.Series\n A ``pd.Series`` copy of the saved data.\n\n \"\"\"\n return self._data\n\n @data.setter\n def data(self, data: pd.Series):\n \"\"\"\n Set the value of the data attribute for the ``Asset`` object.\n\n Parameters\n ----------\n data : pd.Series\n The new ``pd.Series`` to set as data attribute for the object.\n\n \"\"\"\n self._data = data\n\n @property\n def name(self) -> str:\n \"\"\"\n Get the name property of the ``Asset`` object.\n\n Returns\n -------\n str\n The name of the ``Asset``.\n\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name: str):\n \"\"\"\n Set the value of the name property for the ``Asset`` object.\n\n Parameters\n ----------\n name : str\n The new ``str`` to set as name attribute for the object.\n\n \"\"\"\n self._name = name\n\n def as_numpy(self, dtype: np.typing.DTypeLike = np.float32) -> np.ndarray:\n \"\"\"\n Return the saved data as an numpy array. It will have the shape (n_samples, ).\n\n Parameters\n ----------\n dtype : np.typing.DTypeLike\n The data type to create the new ``np.ndarray`` as.\n Defaults to ``np.float32``.\n\n Returns\n -------\n np.ndarray\n A new ``np.ndarray`` from the ``pd.Series`` data.\n\n \"\"\"\n return self._data.to_numpy().astype(dtype)" }, { "identifier": "Dataset", "path": "finq/datasets/dataset.py", "snippet": "class Dataset(object):\n \"\"\"\n A collection of ticker symbols and their historical price data. Fetches information\n and prices from Yahoo! Finance and optionally saves them to a local path for later\n use. Supports fixing missing values by interpolating ``NaN`` and verifying the\n integrity of the fetched data.\n\n Parameters\n ----------\n names : list | None\n The names of the financial assets to create a dataset with.\n symbols : list | None\n The ticker symbols corresponding to the names of the financial assets.\n market : str\n The name of the market to fetch the historical price data from.\n Defaults to ``OMX``.\n index_name : str | None\n The name of the financial index to get ticker symbols and names from.\n proxy : str | None\n The name of the proxy url to use for REST requests.\n cache_name: Path | str\n The name of the path to the file which stores the cache.\n Defaults to ``/home/.finq/http_cache``.\n n_requests : int\n The max number of requests to perform per ``t_interval``. Defaults to ``5``.\n t_interval : int\n The time interval (in seconds) to use with the ``CachedRateLimiter``.\n Defaults to ``1``.\n save : bool\n Wether or not to save the fetched data to a local file path.\n save_path : Path | str\n The local file path to potentially save any fetched data to.\n Defaults to ``.data/dataset/``.\n dataset_name : str\n The name of the ``Dataset`` class instance.\n separator : str\n The csv separator to use when loading and saving any ``pd.DataFrame``.\n Defaults to ``;``.\n\n \"\"\"\n\n def __init__(\n self,\n names: Optional[List[str]] = None,\n symbols: Optional[List[str]] = None,\n *,\n market: str = \"OMX\",\n index_name: Optional[str] = None,\n proxy: Optional[str] = None,\n cache_name: Union[Path, str] = default_finq_cache_path(),\n n_requests: int = 5,\n t_interval: int = 1,\n save: bool = False,\n save_path: Union[Path, str] = default_finq_save_path(),\n dataset_name: str = \"dataset\",\n separator: str = \";\",\n filter_symbols: Callable = lambda s: s,\n ) -> Optional[InvalidCombinationOfArgumentsError]:\n \"\"\" \"\"\"\n\n log.info(\n \"creating cached rate-limited session with \"\n f\"{n_requests} requests per {t_interval} seconds\"\n )\n\n # We combine a cache with rate-limiting to avoid triggering\n # Yahoo! Finance's rate-limiter that can otherwise corrupt data.\n # We specify a maximum number of requests N per X seconds.\n session = CachedRateLimiter(\n cache_name=cache_name,\n limiter=Limiter(\n RequestRate(\n n_requests,\n Duration.SECOND * t_interval,\n ),\n ),\n )\n\n if proxy:\n session.proxies.update(\n {\n \"https\": proxy,\n }\n )\n\n self._proxy = proxy\n self._session = session\n self._n_requests = n_requests\n self._t_interval = t_interval\n\n if (not names or not symbols) and isinstance(index_name, str):\n if market == \"OMX\":\n\n def filter_symbols(s):\n return s.replace(\" \", \"-\") + \".ST\"\n\n names, symbols = fetch_names_and_symbols(\n index_name,\n market=market,\n session=session,\n filter_symbols=filter_symbols,\n )\n\n if not names or not symbols:\n raise InvalidCombinationOfArgumentsError(\n \"You did not pass in a list of names and symbols, and if you \"\n \"passed in an index name to fetch, the request failed since \"\n f\"`{names=}` and `{symbols=}`. Did you pass in a valid index name?\"\n )\n\n if not (len(names) == len(symbols)):\n raise InvalidCombinationOfArgumentsError(\n \"Number of names does not match the number of ticker symbols, \"\n f\"{len(names)} != {len(symbols)}.\\n{names=}\\n{symbols=}\"\n )\n\n self._data = None\n self._info = None\n\n self._names = names\n self._symbols = symbols\n self._market = market\n self._index_name = index_name\n\n self._save = save\n self._save_path = Path(save_path) / dataset_name\n self._dataset_name = dataset_name\n self._separator = separator\n\n def __getitem__(self, key: str) -> Optional[pd.DataFrame]:\n \"\"\"\n Get the ``pd.DataFrame`` from the locally stored dictionary which maps ticker\n symbols to their corresponding historical price data.\n\n Parameters\n ----------\n key : str\n The dictionary key to get data for.\n\n Returns\n -------\n pd.DataFrame\n The data that is associated with the provided ticker key.\n\n \"\"\"\n return self._data.get(key, None)\n\n def __len__(self) -> int:\n \"\"\"\n Get the number of names in the dataset.\n\n Returns\n -------\n int\n The number of names.\n\n \"\"\"\n return len(self._symbols)\n\n @staticmethod\n def _save_data(data: pd.DataFrame, path: Union[Path, str], separator: str):\n \"\"\"\n Save the historical price data for a ticker to a local csv file.\n\n Parameters\n ----------\n data : pd.DataFrame\n The ``pd.DataFrame`` to save as a csv file.\n path : Path | str\n The local file name to save the csv to.\n separator : str\n The csv separator to use when saving the data. Defaults to ``;``.\n\n \"\"\"\n data.to_csv(\n path,\n sep=separator,\n header=True,\n )\n\n @staticmethod\n def _save_info(info: dict, path: Union[Path, str]):\n \"\"\"\n Save the ticker information dictionary to a local file as a ``json`` object.\n\n Parameters\n ----------\n info : dict\n The ticker information dictionary to save as a ``json`` file.\n path : Path | str\n The local file name to save the dictionary to.\n\n \"\"\"\n with open(path, \"w\") as f:\n json.dump(info, f)\n\n @staticmethod\n def _load_data(path: Union[Path, str], separator: str) -> pd.DataFrame:\n \"\"\"\n Create a new ``pd.DataFrame`` from data that is stored locally as a ``csv``.\n\n Parameters\n ----------\n path : Path | str\n The local file path to read the csv from.\n separator : str\n The separator to use for parsing the csv.\n\n Returns\n -------\n pd.DataFrame\n The data that was stored in the csv.\n\n \"\"\"\n return pd.read_csv(path, sep=separator, index_col=\"Date\")\n\n @staticmethod\n def _load_info(path: Union[Path, str]) -> dict:\n \"\"\"\n Parameters\n ----------\n path : Path | str\n The local file path to read the json object from.\n\n Returns\n -------\n dict\n A dictionary containing the information for the ticker.\n\n \"\"\"\n with open(path, \"r\") as f:\n return json.load(f)\n\n @staticmethod\n def _extract_dates_from_data(data: pd.DataFrame) -> Tuple[List, Dict]:\n \"\"\"\n Extract the ``Date`` column from a ``pd.DataFrame`` and produce a sorted list of\n unique dates for the ticker.\n\n Parameters\n ----------\n data : pd.DataFrame\n The data to extract ``Date`` column from.\n\n Returns\n -------\n tuple\n A list of the unique dates (sorted in ascending order) and a dictionary\n containing all ticker dates as key: ``str`` and value: ``list``.\n\n \"\"\"\n dates = {}\n all_dates = []\n\n for ticker, df in data.items():\n dates[ticker] = df.index.to_list()\n all_dates.extend(dates[ticker])\n\n unique_dates = sorted(list(set(all_dates)), reverse=False)\n\n return (unique_dates, dates)\n\n def _save_tickers_data(self):\n \"\"\" \"\"\"\n\n log.info(f\"saving fetched tickers data to {self._save_path}...\")\n\n for ticker in self._symbols:\n self._save_data(\n self._data[ticker],\n self._save_path / \"data\" / f\"{ticker}.csv\",\n separator=self._separator,\n )\n\n log.info(\"OK!\")\n\n def _save_tickers_info(self):\n \"\"\" \"\"\"\n\n log.info(f\"saving fetched tickers info to {self._save_path}...\")\n\n for ticker in self._symbols:\n self._save_info(\n self._info[ticker],\n self._save_path / \"info\" / f\"{ticker}.json\",\n )\n\n log.info(\"OK!\")\n\n def _save_data_and_info(self):\n \"\"\"\n Saves the info and data objects to a local file path.\n\n \"\"\"\n\n self._save_tickers_data()\n self._save_tickers_info()\n\n def _fetch_tickers_data(\n self,\n period: str,\n cols: List[str],\n ):\n \"\"\" \"\"\"\n\n data = {}\n\n for ticker in (bar := tqdm(self._symbols)):\n bar.set_description(f\"Fetching ticker {ticker} data from Yahoo! Finance\")\n\n yf_ticker = yf.Ticker(ticker, session=self._session)\n data[ticker] = yf_ticker.history(\n period=period,\n proxy=self._proxy,\n )[\n cols\n ].tz_localize(None)\n\n all_dates, dates = self._extract_dates_from_data(data)\n\n self._data = data\n self._dates = dates\n self._all_dates = all_dates\n\n def _fetch_tickers_info(self):\n \"\"\" \"\"\"\n\n info = {}\n\n for ticker in (bar := tqdm(self._symbols)):\n bar.set_description(f\"Fetching ticker {ticker} info from Yahoo! Finance\")\n\n yf_ticker = yf.Ticker(ticker, session=self._session)\n info[ticker] = yf_ticker.get_info(proxy=self._proxy)\n\n self._info = info\n\n def _fetch_tickers_data_and_info(\n self,\n period: str,\n cols: List[str],\n ):\n \"\"\"\n Use the `yfinance` library to fetch historical ticker data for the specified time\n period. The performance of the REST requests is highly dependent on three things:\n the config of your `CachedRateLimiter`, the amount of tickers you want to fetch,\n and the multi-threading support of your CPU.\n\n Parameters\n ----------\n period : str\n The time period to try and fetch data from.\n cols : list\n The columns of the fetched ticker data to collect.\n\n \"\"\"\n\n self._fetch_tickers_data(period, cols)\n self._fetch_tickers_info()\n\n def load_local_data_files(self) -> Optional[DirectoryNotFoundError]:\n \"\"\" \"\"\"\n\n path = Path(self._save_path)\n data_path = path / \"data\"\n\n if not path.is_dir():\n raise DirectoryNotFoundError(\n f\"The local save path {path} does not exist. Perhaps you haven't yet \"\n \"tried fetching any data? To do that run `dataset.fetch_data(..)`.\"\n )\n\n if not data_path.is_dir():\n raise DirectoryNotFoundError(\n f\"The local save path {data_path} does not exist. Perhaps you haven't \"\n \"yet tried fetching any data? To do that run `dataset.fetch_data(..)`.\"\n )\n\n data = {}\n\n for ticker in (bar := tqdm(self._symbols)):\n bar.set_description(f\"Loading ticker {ticker} data from local path {path}\")\n\n data[ticker] = self._load_data(\n data_path / f\"{ticker}.csv\",\n separator=self._separator,\n )\n\n if not isinstance(data[ticker].index, pd.DatetimeIndex):\n data[ticker].index = pd.to_datetime(data[ticker].index)\n\n all_dates, dates = self._extract_dates_from_data(data)\n\n self._data = data\n self._dates = dates\n self._all_dates = all_dates\n\n def load_local_info_files(self) -> Optional[DirectoryNotFoundError]:\n \"\"\" \"\"\"\n path = Path(self._save_path)\n info_path = path / \"info\"\n\n if not path.is_dir():\n raise DirectoryNotFoundError(\n f\"The local save path {path} does not exist. Perhaps you haven't yet \"\n \"tried fetching any data? To do that run `dataset.fetch_data(..)`.\"\n )\n\n if not info_path.is_dir():\n raise DirectoryNotFoundError(\n f\"The local save path {info_path} does not exist. Perhaps you haven't \"\n \"yet tried fetching any data? To do that run `dataset.fetch_data(..)`.\"\n )\n\n info = {}\n\n for ticker in (bar := tqdm(self._symbols)):\n bar.set_description(f\"Loading ticker {ticker} data from local path {path}\")\n\n info[ticker] = self._load_info(\n info_path / f\"{ticker}.json\",\n )\n\n self._info = info\n\n def load_local_files(self):\n \"\"\"\n Load the locally saved info and data files. The info is read from file as a\n ``json`` and the data is read from ``csv`` as a ``pd.DataFrame``.\n\n Raises\n ------\n DirectoryNotFoundError\n When either of the paths to the saved ``info`` and ``data`` is not a directory.\n\n \"\"\"\n\n self.load_local_data_files()\n self.load_local_info_files()\n\n def fetch_data(\n self,\n period: str,\n *,\n cols: List[str] = [\"Open\", \"High\", \"Low\", \"Close\"],\n ) -> Dataset:\n \"\"\"\n Fetch the historical ticker data for the specified time period. If there exists\n locally saved files for all tickers, will try and load them instead of fetching\n from Yahoo! Finance. Saves the fetched files if ``save=True`` was specified in\n the class constructor.\n\n Parameters\n ----------\n period : str\n The time period to try and fetch data from. Valid values are (``1d``,\n ``5d``, ``1mo``, ``3mo``, ``6mo``, ``1y``, ``2y``, ``5y``, ``10y``,\n ``ytd``, ``max``).\n cols : list\n The columns of the fetched ticker data to collect. Defaults to\n (``Date``, ``Open``, ``High``, ``Low``, ``Close``).\n\n Returns\n -------\n Dataset\n The initialized instance of ``self`` with ticker data loaded or fetched.\n\n \"\"\"\n\n if all_tickers_data_saved(self._save_path, self._symbols):\n log.info(\n f\"found existing local data files for {self.__class__.__name__}, \"\n \"attempting local load of data files...\"\n )\n\n try:\n self.load_local_data_files()\n log.info(\"OK!\")\n return self\n\n except DirectoryNotFoundError:\n log.warning(\"failed to load local data files, attempting new fetch...\")\n\n self._fetch_tickers_data(period, cols)\n\n if self._save:\n setup_finq_save_data_path(self._save_path)\n self._save_tickers_data()\n\n return self\n\n def fetch_info(\n self,\n ) -> Dataset:\n \"\"\" \"\"\"\n\n if all_tickers_info_saved(self._save_path, self._symbols):\n log.info(\n f\"found existing local info files for {self.__class__.__name__}, \"\n \"attempting local load of info files...\"\n )\n\n try:\n self.load_local_info_files()\n log.info(\"OK!\")\n return self\n\n except DirectoryNotFoundError:\n log.warning(\"failed to load local info files, attempting new fetch...\")\n\n self._fetch_tickers_info()\n\n if self._save:\n setup_finq_save_info_path(self._save_path)\n\n return self\n\n def fetch_data_and_info(\n self,\n period: str,\n *,\n cols: List[str] = [\"Open\", \"High\", \"Low\", \"Close\"],\n ) -> Dataset:\n \"\"\" \"\"\"\n self = self.fetch_data(period, cols=cols)\n self = self.fetch_info()\n return self\n\n def fix_missing_data(\n self,\n *,\n cols: List[str] = [\"Open\", \"High\", \"Low\", \"Close\"],\n resave: bool = True,\n ) -> Dataset:\n \"\"\"\n Compares each tickers dates in their corresponding ``pd.DataFrame`` and compares\n to the known set of dates collected. If there are any missing values, will add\n the missing dates to the dataframe and then use ``df.interpolate()`` to fix them.\n Default interpolation strategy is ``linear``.\n\n Parameters\n ----------\n cols : list\n The columns of the ``pd.DataFrame`` to consider when looking for missing data\n to interpolate. Defaults to (``Open``, ``High``, ``Low``, ``Close``).\n resave : bool\n Whether or not to resave the data to local path after fixing missing values.\n Defaults to ``True`` but will onlyesave if there existed missing data.\n\n Returns\n -------\n Dataset\n The initialized instance of ``self``.\n\n \"\"\"\n\n log.info(\"attempting to fix any missing data...\")\n\n n_missing_data = 0\n for ticker in (bar := tqdm(self._symbols)):\n bar.set_description(f\"Fixing ticker {ticker} potential missing values\")\n\n df = self._data[ticker]\n diff = set(self._all_dates) - set(self._dates[ticker])\n\n if diff:\n n_missing_data += 1\n\n df_missed = pd.DataFrame(index=list(diff))\n df_missed.index.name = \"Date\"\n\n df_fixed = pd.concat((df, df_missed)).sort_index(inplace=False)\n df_fixed[cols] = df_fixed[cols].interpolate()\n\n if df_fixed[df_fixed.isnull().any(axis=1)].index.values.size:\n log.error(\n f\"failed to interpolate missing prices for ticker {ticker}!\"\n )\n\n self._data[ticker] = df_fixed\n self._dates[ticker] = self._all_dates\n\n if n_missing_data and resave:\n log.info(f\"fixed {n_missing_data} tickers with missing data\")\n if self._save:\n log.info(f\"saving fixed data to {self._save_path}...\")\n self._save_tickers_data()\n\n log.info(\"OK!\")\n return self\n\n def verify_data(self) -> Union[ValueError, Dataset]:\n \"\"\"\n Tries to verify that the stored data does not contain any missing values.\n This is performed by comparing the dates in each ticker ``pd.DataFrame``\n with the known set of all fetched dates.\n\n Returns\n -------\n Dataset\n The initialized instance of ``self``.\n\n Raises\n ------\n ValueError\n If there exists missing values in any stored ``pd.DataFrame``.\n\n \"\"\"\n\n log.info(\"verifying that stored data has no missing values...\")\n for ticker in (bar := tqdm(self._symbols)):\n bar.set_description(f\"Verifying ticker {ticker} data\")\n\n diff = set(self._all_dates) - set(self._dates[ticker])\n if diff:\n raise ValueError(\n f\"There is a difference in dates for symbol {ticker}, have you \"\n \"tried fixing missing values prior to verifying? To do that, run \"\n \"dataset.fix_missing_data() with your initialized Dataset class.\"\n )\n\n log.info(\"OK!\")\n return self\n\n def run(self, period: str = \"1y\") -> Dataset:\n \"\"\"\n Call the three core methods for the ``Dataset`` class which fetches data,\n tries to fix missing values, and lastly verifies that there is no missing data.\n\n Parameters\n ----------\n period : str\n The time period to try and fetch data from. Valid values are (``1d``,\n ``5d``, ``1mo``, ``3mo``, ``6mo``, ``1y``, ``2y``, ``5y``, ``10y``,\n ``ytd``, ``max``). Defaults to ``1y``.\n\n Returns\n -------\n Dataset\n The intialized instance of ``self``.\n\n \"\"\"\n return self.fetch_data(period).fix_missing_data().verify_data()\n\n def visualize_ticker(\n self,\n ticker: str,\n **kwargs: Dict[str, Any],\n ):\n \"\"\" \"\"\"\n\n if kwargs.get(\"title\", None) is None:\n kwargs[\"title\"] = f\"{ticker} historical OHLC prices [{self._market}]\"\n\n mpf.plot(\n self._data[ticker],\n **kwargs,\n )\n\n def visualize(\n self,\n *,\n title: str = \"Historical stock data\",\n xlabel: str = \"Dates\",\n ylabel: str = \"Closing price [$]\",\n ticks_rotation: int = 70,\n legend_loc: str = \"best\",\n log_scale: bool = False,\n save_path: Optional[str] = None,\n price_type: str = \"Close\",\n show: bool = True,\n block: bool = True,\n ):\n \"\"\"\n Plot the historical ticker price data over time.\n\n Parameters\n ----------\n title : str\n The header title to set on the generated plot.\n xlabel : str\n The label to use for the x-axis.\n ylabel : str\n The label to use for the y-axis.\n ticks_rotation : int\n The amount of degrees to rotate the x-axis ticks with. Defaults to ``70``.\n legend_loc : str\n The location of the legend. Some possible values are (``best``, ``center``,\n ``upper left``, ``upper right``, ``lower left``, ``lower right``).\n Defaults to ``best``.\n log_scale : bool\n ``True`` if the historical data should be log scaled, otherwise ``False``.\n save_path : str | None\n The local file to save the generated plot to. Does not save the plot if\n the argument is ``None``.\n price_type : str\n The price type of the historical data to plot. Has to be one\n of (``Open``, ``High``, ``Low``, ``Close``). Defaults to ``Close``.\n show : bool\n ``True`` if the generated plot should be shown on the screen, otherwise\n ``False``. Defaults to ``True``.\n block : bool\n Whether to wait for all figures to be closed before returning. When ``False``\n the figure windows will be displayed and returned immediately. Defaults to\n ``True``.\n\n \"\"\"\n\n for ticker, data in self._data.items():\n plt.plot(\n np.log(data[price_type]) if log_scale else data[price_type],\n label=ticker,\n )\n\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.xticks(rotation=ticks_rotation)\n plt.legend(loc=legend_loc)\n\n if save_path:\n log.info(f\"saving plot to path {save_path}\")\n plt.savefig(save_path)\n log.info(\"OK!\")\n\n if show:\n plt.show(block=block)\n plt.close()\n\n def get_tickers(self) -> List[str]:\n \"\"\"\n Return the saved list of ticker symbols.\n\n Returns\n -------\n list\n A list of ``str`` ticker symbols.\n\n \"\"\"\n return self._symbols\n\n def get_data(self) -> Dict[str, pd.DataFrame]:\n \"\"\"\n Return the saved dictionary which maps ticker symbols to their\n corresponding historical data with the following columns:\n (``Date``, ``Open``, ``High``, ``Low``, ``Close``).\n\n Returns\n -------\n dict\n A dictionary with key: ``str`` and value: ``pd.DataFrame``.\n\n \"\"\"\n return self._data\n\n def as_assets(self, price_type: str = \"Close\") -> Dict[str, Asset]:\n \"\"\"\n Create a list of Assets for each ticker and specified price type.\n\n Parameters\n ----------\n price_type : str\n The price type data to create an ``Asset`` object with. Has to be one\n of (``Open``, ``High``, ``Low``, ``Close``). Defaults to ``Close``.\n\n Returns\n -------\n dict\n A dictionary of newly created ``Asset`` objects with ticker symbols as keys.\n\n \"\"\"\n return {\n ticker: Asset(\n self._data[ticker][price_type],\n self._names[i],\n market=self._market,\n index_name=self._index_name,\n price_type=price_type,\n pre_compute=False,\n )\n for i, ticker in enumerate(self._symbols)\n }\n\n def as_df(self, price_type: str = \"Close\") -> pd.DataFrame:\n \"\"\"\n Create an aggregated ``pd.DataFrame`` for the specified price type.\n It will have the shape (n_samples, n_tickers).\n\n Parameters\n ----------\n price_type : str\n The price type data to create the ``pd.DataFrame`` object with. Has to\n be one of (``Open``, ``High``, ``Low``, ``Close``). Defaults to ``Close``.\n\n Returns\n -------\n pd.DataFrame\n A new ``pd.DataFrame`` with ticker names as columns.\n\n \"\"\"\n\n return pd.DataFrame(\n {t: d[price_type] for t, d in zip(self._symbols, self._data.values())},\n index=self._all_dates,\n )\n\n def as_numpy(\n self,\n price_type: str = \"Close\",\n *,\n dtype: np.typing.DTypeLike = np.float32,\n ) -> np.ndarray:\n \"\"\"\n Extract the specified price type from stored data as np.ndarray.\n It will have the shape (n_tickers, n_samples).\n\n Parameters\n ----------\n price_type : str\n The price type data to create the ``np.ndarray`` with. Has to be one\n of (``Open``, ``High``, ``Low``, ``Close``). Defaults to ``Close``.\n dtype : np.typing.DTypeLike\n The data type to create the new ``np.ndarray`` as.\n Defaults to ``np.float32``.\n\n Returns\n -------\n np.ndarray\n A new ``np.ndarray`` from the specified price type and dtype.\n\n \"\"\"\n return np.array(\n [d[price_type].to_numpy().astype(dtype) for d in self._data.values()]\n )" }, { "identifier": "FinqError", "path": "finq/exceptions.py", "snippet": "class FinqError(Exception):\n \"\"\" \"\"\"" }, { "identifier": "InvalidCombinationOfArgumentsError", "path": "finq/exceptions.py", "snippet": "class InvalidCombinationOfArgumentsError(FinqError):\n \"\"\" \"\"\"\n\n pass" }, { "identifier": "InvalidPortfolioWeightsError", "path": "finq/exceptions.py", "snippet": "class InvalidPortfolioWeightsError(FinqError):\n \"\"\" \"\"\"\n\n pass" }, { "identifier": "ObjectiveFunctionError", "path": "finq/exceptions.py", "snippet": "class ObjectiveFunctionError(FinqError):\n \"\"\" \"\"\"\n\n pass" }, { "identifier": "PortfolioNotYetOptimizedError", "path": "finq/exceptions.py", "snippet": "class PortfolioNotYetOptimizedError(FinqError):\n \"\"\" \"\"\"\n\n pass" }, { "identifier": "period_returns", "path": "finq/formulas.py", "snippet": "def period_returns(x: np.ndarray, period: int = 1) -> np.ndarray:\n \"\"\" \"\"\"\n\n return (x[:, period:] / x[:, :-period]) - 1" }, { "identifier": "sharpe_ratio", "path": "finq/formulas.py", "snippet": "def sharpe_ratio(\n r: Union[float, np.ndarray],\n v: Union[float, np.ndarray],\n rfr: float,\n) -> Union[float, np.ndarray]:\n \"\"\" \"\"\"\n\n return (r - rfr) / v" }, { "identifier": "weighted_returns", "path": "finq/formulas.py", "snippet": "def weighted_returns(w: np.ndarray, r: np.ndarray) -> np.ndarray:\n \"\"\" \"\"\"\n\n return np.dot(w, r)" }, { "identifier": "weighted_variance", "path": "finq/formulas.py", "snippet": "def weighted_variance(w: np.ndarray, cov: np.ndarray) -> np.ndarray:\n \"\"\" \"\"\"\n\n return np.dot(w, np.dot(cov, w.T))" } ]
import logging import pandas as pd import numpy as np import scipy.optimize as scipyopt import matplotlib.pyplot as plt from functools import wraps from tqdm import tqdm from finq.asset import Asset from finq.datasets import Dataset from finq.exceptions import ( FinqError, InvalidCombinationOfArgumentsError, InvalidPortfolioWeightsError, ObjectiveFunctionError, PortfolioNotYetOptimizedError, ) from finq.formulas import ( period_returns, sharpe_ratio, weighted_returns, weighted_variance, ) from typing import ( Any, Callable, List, Dict, Tuple, Union, Optional, )
10,819
def period_returns_mean(self, period: int) -> float: """ """ return np.mean(period_returns(self._data, period=period), axis=1) def daily_covariance(self) -> np.ndarray: """ """ return np.cov(period_returns(self._data, period=1), rowvar=True) def yearly_covariance(self) -> np.ndarray: """ """ return np.cov( period_returns(self._data, period=self._n_trading_days), rowvar=True ) def period_covariance(self, period: int) -> np.ndarray: """ """ return np.cov(period_returns(self._data, period=period), rowvar=True) def set_objective_function( self, function: Callable, *args: Tuple[Any, ...], ): """ """ self._objective_function = function self._objective_function_args = args def set_objective_constraints( self, *constraints, ): """ """ self._objective_constraints = [{"type": t, "fun": c} for (t, c) in constraints] def set_objective_bounds( self, bounds: Union[Tuple[int, ...], List[Tuple[int, ...]]], ): """ """ if isinstance(bounds, tuple): bounds = [bounds for _ in range(self._data.shape[0])] self._objective_bounds = bounds def sample_random_portfolios( self, n_samples: int, *, distribution: Union[str, Callable] = "lognormal", **kwargs: Dict[str, Any], ): """ """ if isinstance(distribution, str): distribution = self._weight_initializations.get(distribution, None) if distribution is None: raise ValueError( "You provided a non valid weight initialization distribution." ) portfolios = [] for i in (bar := tqdm(range(n_samples))): if i % 10: bar.set_description( f"Sampling random portfolio {i + 1} from " f"{distribution.__name__} distribution" ) portfolio = distribution(**kwargs) portfolios.append(portfolio / portfolio.sum()) self._random_portfolios = np.transpose(np.concatenate(portfolios, axis=1)) @check_valid_weights def variance(self) -> float: """ """ return weighted_variance( self._weights.T, self.daily_covariance(), ) @check_valid_weights def volatility(self) -> float: """ """ return np.sqrt( weighted_variance( self._weights.T, self.daily_covariance(), ), ) @check_valid_weights def expected_returns(self) -> float: """ """ return weighted_returns(self._weights.T, self.daily_returns_mean()) @check_valid_weights def sharpe_ratio(self) -> float: """ """ r = self.expected_returns() v = self.volatility() return sharpe_ratio(r, v, self._risk_free_rate) def verify_can_optimize(self) -> Optional[FinqError]: """ """ if self._objective_function is None:
""" MIT License Copyright (c) 2023 Wilhelm Ågren Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. File created: 2023-10-20 Last updated: 2023-11-10 """ log = logging.getLogger(__name__) class Portfolio(object): """ """ # For a full list of `scipy` optimization methods and references, see the link below. # https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html _supported_optimization_methods = ( "Nelder-Mead", "Powell", "CG", "BFGS", "Newton-CG", "L-BFGS-B", "TNC", "COBYLA", "SLSQP", "trust-constr", "dogleg", "trust-ncg", "trust-exact", "trust-krylov", ) _weight_initializations = { "lognormal": np.random.lognormal, "normal": np.random.normal, "uniform": np.random.uniform, } def __init__( self, data: Union[Dataset, List[Asset], np.ndarray, pd.DataFrame], *, weights: Optional[np.ndarray] = None, names: Optional[Union[Dict[str, str], List[str]]] = None, symbols: Optional[Union[Dict[str, str], List[str]]] = None, confidence_level: float = 0.95, risk_free_rate: float = 5e-3, n_trading_days: int = 252, objective_function: Optional[Callable] = None, objective_function_args: Tuple[Any, ...] = (), objective_bounds: Optional[List[Tuple[int, ...]]] = None, objective_constraints: Optional[Tuple[Dict, ...]] = None, ): """ """ if isinstance(data, Dataset): assets = data.as_assets() data = list(assets.values()) symbols = list(assets.keys()) if not isinstance(data, list): if names is None and symbols is None and not isinstance(data, pd.DataFrame): raise InvalidCombinationOfArgumentsError( "You need to provide the names and ticker symbols of each asset that you " "want to include in your portfolio if the data you provided is neither a " "`list` of `Asset` objects or a `pd.DataFrame`. You can also try " "providing only one of the arguments `names` and `symbols`, but then as " "a dictionary of the form `key=name` `value=symbol`." ) if isinstance(data, list): symbols = [a.name for a in data] data = np.array([a.data for a in data]) if isinstance(data, pd.DataFrame): symbols = data.columns data = data.to_numpy().T if isinstance(names, dict): symbols = list(names.values()) names = list(names.keys()) if isinstance(symbols, dict): names = list(symbols.keys()) symbols = list(symbols.values()) self._data = data self._weights = weights self._names = names self._symbols = symbols self._confidence_level = confidence_level self._risk_free_rate = risk_free_rate self._n_trading_days = n_trading_days self._random_portfolios = None self._objective_function = objective_function self._objective_function_args = objective_function_args self._objective_bounds = objective_bounds self._objective_constraints = objective_constraints def weights_are_normalized(self) -> bool: """ """ return np.allclose(self._weights.sum(), 1.0, rtol=1e-6) def initialize_random_weights( self, distribution: Union[str, Callable], *args: Tuple[Any, ...], **kwargs: Dict[str, Any], ): """ """ if isinstance(distribution, str): distribution = self._weight_initializations.get(distribution, None) if distribution is None: raise ValueError( "You provided a non valid weight initialization distribution." ) weights = distribution(*args, **kwargs) self._weights = weights / weights.sum() def check_valid_weights(func) -> Callable: """ """ @wraps(func) def _check_valid_weights(self, *args, **kwargs) -> Optional[FinqError]: """ """ if self._weights is None: raise PortfolioNotYetOptimizedError( "Portfolio weights are `None`. Perhaps you have not yet optimized it? " ) if not self.weights_are_normalized(): raise InvalidPortfolioWeightsError( "Your portfolio weights are not normalized. Make sure to normalize them " "(they sum to one) before calculating any analytical quantities. " ) return func(self, *args, **kwargs) return _check_valid_weights def daily_returns(self) -> np.ndarray: """ """ return period_returns(self._data, period=1) def yearly_returns(self) -> np.ndarray: """ """ return period_returns(self._data, period=self._n_trading_days) def period_returns(self, period: int) -> np.ndarray: """ """ return period_returns(self._data, period=period) def daily_returns_mean(self) -> float: """ """ return np.mean(period_returns(self._data, period=1), axis=1) def yearly_returns_mean(self) -> float: """ """ return np.mean(period_returns(self._data, period=self._n_trading_days), axis=1) def period_returns_mean(self, period: int) -> float: """ """ return np.mean(period_returns(self._data, period=period), axis=1) def daily_covariance(self) -> np.ndarray: """ """ return np.cov(period_returns(self._data, period=1), rowvar=True) def yearly_covariance(self) -> np.ndarray: """ """ return np.cov( period_returns(self._data, period=self._n_trading_days), rowvar=True ) def period_covariance(self, period: int) -> np.ndarray: """ """ return np.cov(period_returns(self._data, period=period), rowvar=True) def set_objective_function( self, function: Callable, *args: Tuple[Any, ...], ): """ """ self._objective_function = function self._objective_function_args = args def set_objective_constraints( self, *constraints, ): """ """ self._objective_constraints = [{"type": t, "fun": c} for (t, c) in constraints] def set_objective_bounds( self, bounds: Union[Tuple[int, ...], List[Tuple[int, ...]]], ): """ """ if isinstance(bounds, tuple): bounds = [bounds for _ in range(self._data.shape[0])] self._objective_bounds = bounds def sample_random_portfolios( self, n_samples: int, *, distribution: Union[str, Callable] = "lognormal", **kwargs: Dict[str, Any], ): """ """ if isinstance(distribution, str): distribution = self._weight_initializations.get(distribution, None) if distribution is None: raise ValueError( "You provided a non valid weight initialization distribution." ) portfolios = [] for i in (bar := tqdm(range(n_samples))): if i % 10: bar.set_description( f"Sampling random portfolio {i + 1} from " f"{distribution.__name__} distribution" ) portfolio = distribution(**kwargs) portfolios.append(portfolio / portfolio.sum()) self._random_portfolios = np.transpose(np.concatenate(portfolios, axis=1)) @check_valid_weights def variance(self) -> float: """ """ return weighted_variance( self._weights.T, self.daily_covariance(), ) @check_valid_weights def volatility(self) -> float: """ """ return np.sqrt( weighted_variance( self._weights.T, self.daily_covariance(), ), ) @check_valid_weights def expected_returns(self) -> float: """ """ return weighted_returns(self._weights.T, self.daily_returns_mean()) @check_valid_weights def sharpe_ratio(self) -> float: """ """ r = self.expected_returns() v = self.volatility() return sharpe_ratio(r, v, self._risk_free_rate) def verify_can_optimize(self) -> Optional[FinqError]: """ """ if self._objective_function is None:
raise ObjectiveFunctionError
5
2023-10-09 19:02:54+00:00
16k
lmb-freiburg/ldce
scripts/ldce.py
[ { "identifier": "disabled_train", "path": "sampling_helpers.py", "snippet": "def disabled_train(self, mode=True):\n \"\"\"Overwrite model.train with this function to make sure train/eval mode\n does not change anymore.\"\"\"\n return self" }, { "identifier": "get_model", "path": "sampling_helpers.py", "snippet": "def get_model(cfg_path=\"configs/latent-diffusion/cin256-v2.yaml\", ckpt_path=\"models/ldm/cin256-v2/model.ckpt\"):\n config = OmegaConf.load(cfg_path)\n model = load_model_from_config(config, ckpt_path)\n return model" }, { "identifier": "_unmap_img", "path": "sampling_helpers.py", "snippet": "def _unmap_img(x, from_image_net_dist=False):\n \"\"\"\n from 0 to 1 to -1 to 1\n \"\"\"\n\n return 2. * x - 1" }, { "identifier": "generate_samples", "path": "sampling_helpers.py", "snippet": "def generate_samples(\n model, \n sampler, \n target_y, \n ddim_steps, \n scale, \n init_image=None, \n t_enc=None,\n init_latent=None, \n ccdddim=False, \n ddim_eta=0., \n latent_t_0=True, \n prompts: list = None,\n seed: int = 0\n):\n torch.cuda.empty_cache()\n \n all_samples = []\n all_probs = []\n all_videos = []\n all_masks = []\n all_cgs = []\n\n with torch.no_grad():\n with model.ema_scope():\n tic = time.time()\n print(f\"rendering target classes '{target_y}' in {len(sampler.ddim_timesteps)} or {ddim_steps} steps and using s={scale:.2f}.\")\n batch_size = target_y.shape[0]\n if \"class_label\" == model.cond_stage_key: # class-conditional\n uc = model.get_learned_conditioning({model.cond_stage_key: torch.tensor(batch_size * [1000]).to(model.device)})\n c = model.get_learned_conditioning({model.cond_stage_key: target_y.to(model.device)})\n elif \"txt\" == model.cond_stage_key: # text-conditional\n uc = model.get_learned_conditioning(batch_size * [\"\"])\n if prompts is None:\n raise ValueError(\"Prompts are not defined!\")\n c = model.get_learned_conditioning(prompts)\n else:\n raise NotImplementedError\n \n if init_latent is not None:\n if seed!=-1:\n noises_per_batch = []\n for b in range(batch_size):\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n torch.cuda.manual_seed_all(seed)\n noises_per_batch.append(torch.randn_like(init_latent[b]))\n noise = torch.stack(noises_per_batch, dim=0)\n else:\n noise = None\n z_enc = sampler.stochastic_encode(init_latent, torch.tensor([t_enc] * (batch_size)).to(\n init_latent.device), noise=noise) if not latent_t_0 else init_latent\n\n if seed!=-1:\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n torch.cuda.manual_seed_all(seed)\n\n # decode it\n if ccdddim:\n out = sampler.decode(\n z_enc, \n c, \n t_enc, \n unconditional_guidance_scale=scale,\n unconditional_conditioning=uc, \n y=target_y.to(model.device), \n latent_t_0=latent_t_0,\n )\n samples = out[\"x_dec\"]\n prob = out[\"prob\"]\n vid = out[\"video\"]\n mask = out[\"mask\"]\n cg = out[\"concensus_regions\"]\n\n else:\n samples = sampler.decode(z_enc, c, t_enc, unconditional_guidance_scale=scale,\n unconditional_conditioning=uc)\n\n x_samples = model.decode_first_stage(samples)\n x_samples_ddim = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)\n cat_samples = x_samples_ddim #torch.cat([init_image[:1], x_samples_ddim], dim=0)\n else:\n\n samples_ddim, _ = sampler.sample(S=ddim_steps,\n conditioning=c,\n batch_size=batch_size,\n shape=[3, 64, 64],\n verbose=False,\n unconditional_guidance_scale=scale,\n unconditional_conditioning=uc,\n eta=ddim_eta)\n\n x_samples_ddim = model.decode_first_stage(samples_ddim)\n x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0,\n min=0.0, max=1.0)\n cat_samples = x_samples_ddim\n\n all_samples.append(cat_samples)\n all_probs.append(prob) if ccdddim and prob is not None else None\n all_videos.append(vid) if ccdddim and vid is not None else None\n all_masks.append(mask) if ccdddim and mask is not None else None\n all_cgs.append(cg) if ccdddim and cg is not None else None\n tac = time.time()\n\n out = {}\n out[\"samples\"] = all_samples\n out[\"probs\"] = all_probs if len(all_probs) > 0 else None\n out[\"videos\"] = all_videos if len(all_videos) > 0 else None\n out[\"masks\"] = all_masks if len(all_masks) > 0 else None\n out[\"cgs\"] = all_cgs if len(all_cgs) > 0 else None\n \n return out" }, { "identifier": "load_model_hf", "path": "sampling_helpers.py", "snippet": "def load_model_hf(repo_id, filename, dir, ckpt_config_filename, device='cpu'):\n cache_config_file = hf_hub_download(repo_id=repo_id, filename=ckpt_config_filename)\n\n args = SLConfig.fromfile(cache_config_file)\n args.device = device\n model = build_model(args)\n\n cache_file = hf_hub_download(repo_id=repo_id, filename=filename, cache_dir=dir)\n checkpoint = torch.load(cache_file, map_location='cpu')\n log = model.load_state_dict(clean_state_dict(checkpoint['model']), strict=False)\n print(\"Model loaded from {} \\n => {}\".format(cache_file, log))\n _ = model.eval()\n return model.to(device)" }, { "identifier": "CCMDDIMSampler", "path": "ldm/models/diffusion/cc_ddim.py", "snippet": "class CCMDDIMSampler(object):\n def __init__(self, model, classifier, model_type=\"latent\", schedule=\"linear\", guidance=\"free\", lp_custom=False,\n deg_cone_projection=10., denoise_dist_input=True, classifier_lambda=1, dist_lambda=0.15,\n enforce_same_norms=True, seg_model=None, detect_model=None, masked_guidance=False,\n backprop_diffusion=True, log_backprop_gradients: bool = False, mask_alpha = 5., cone_projection_type= 'default', self_recurrence=0, classifier_wrapper: bool = True, record_intermediate_results:bool=False, verbose:bool=True,**kwargs):\n\n super().__init__()\n self.model_type = model_type\n self.lp_custom = lp_custom\n self.images = []\n self.probs = []\n self.classifier_lambda = classifier_lambda\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n self.classifier = classifier\n self.guidance = guidance\n self.backprop_diffusion = backprop_diffusion\n self.log_backprop_gradients = log_backprop_gradients\n # self.projected_counterfactuals = projected_counterfactuals\n self.deg_cone_projection = deg_cone_projection\n self.cone_projection_type = cone_projection_type\n self.denoise_dist_input = denoise_dist_input\n self.dist_lambda = dist_lambda\n self.enforce_same_norms = enforce_same_norms\n self.seg_model = seg_model\n self.masked_guidance = masked_guidance\n self.mask_alpha = mask_alpha\n self.self_recurrence = self_recurrence\n self.classifier_wrapper = classifier_wrapper\n self.record_intermediate_results = record_intermediate_results\n self.verbose = verbose\n\n self.init_images = None\n self.init_labels = None \n self.mask = None\n self.concensus_regions = []\n \n self.detect_model = detect_model\n self.classification_criterion = torch.nn.CrossEntropyLoss()\n self.binary_classification_criterion = torch.nn.BCEWithLogitsLoss()\n \n self.dino_pipeline = False\n if isinstance(self.lp_custom, str) and \"dino_\" in self.lp_custom:\n self.distance_criterion = DinoLoss(dino=torch.hub.load('facebookresearch/dino:main', 'dino_vitb16').eval(), loss_identifier=self.lp_custom.split(\"_\")[-1])\n self.dino_init_features = None\n self.dino_pipeline = True\n elif isinstance(self.lp_custom, int):\n if self.lp_custom == 1:\n self.distance_criterion = torch.nn.L1Loss(reduction='sum')\n elif self.lp_custom == 2:\n self.distance_criterion = torch.nn.MSELoss(reduction='sum')\n else:\n raise NotImplementedError\n else:\n raise NotImplementedError\n\n def get_classifier_dist(self, x, t=None):\n \"\"\"\n Create a distribution over the classifier output space\n Args:\n x: input image for which to create the distribution over the classifier output space range [-1, 1]\n\n Returns:\n dist: torch distribution over the classifier output space\n\n \"\"\"\n x = tf.center_crop(x, 224)\n x = normalize(_map_img(x))\n logit = self.classifier(x) # (TODO) add option for t here\n dist = torchd.independent.Independent(OneHotDist(logit, validate_args = False), 0) # 0 here is the batch dimension, so event_shape is (num_classes, )\n return dist\n\n def get_classifier_logits(self, x, t=None):\n \"\"\"\n Returns classifier logits\n Args:\n x: input image for which to create the prediction\n\n Returns:\n logits: logits of output layer of target model\n\n \"\"\"\n x = _map_img(x)\n if not self.classifier_wrapper: # only works for ImageNet!\n x = tf.center_crop(x, 224)\n x = normalize(x)\n return self.classifier(x)\n\n def get_dino_features(self, x, device):\n x = normalize(_map_img(tf.center_crop(x, output_size=224)))\n return self.distance_criterion.dino(x.to(device))\n\n def get_mask_clip_seg(self):\n \"\"\"\n this function returns a negative mask given by a segmentation model for the region of interest\n values are higher outside the region of interest\n \"\"\"\n if self.mask is not None:\n return self.mask\n\n prompts = []\n\n for l in self.init_labels:\n prompts.append(re.sub(r'\\b(\\w)', lambda m: m.group(1).upper(), i2h[l]))\n\n with torch.no_grad():\n img_to_seg = F.interpolate(normalize(self.init_images), size=(352, 352), mode='bilinear',\n align_corners=False).to(self.init_images.device)\n preds = self.seg_model(img_to_seg, prompts)[0]\n preds = F.interpolate(preds, size=self.init_images.shape[-2:], mode='bilinear', align_corners=False)\n preds = torch.sigmoid(preds) # torch.softmax(preds.view(preds.shape[0], -1), dim=1).view(*preds.shape)\n # penalty = 1-preds\n preds = (preds - preds.min()) / (preds.max() - preds.min())\n preds = torch.sigmoid(self.mask_alpha*2*(preds-0.5))\n self.mask = preds.to(self.init_images.device)\n return self.mask\n\n def get_mask(self):\n \"\"\"\n this function returns a negative mask given by a segmentation model for the region of interest\n values are higher outside the region of interest\n \"\"\"\n\n if self.mask is not None:\n return self.mask\n\n with torch.no_grad():\n print(\"input range\", self.init_images.min(), self.init_images.max())\n image_int8 = (self.init_images[0].permute(1, 2, 0).cpu().numpy() * 255.).astype(np.uint8)\n # detected_boxes = detect(image, text_prompt=i2h[label], model=groundingdino_model, image_source=image_image)\n detected_boxes = detect(normalize(self.init_images[0]).squeeze(),\n text_prompt=i2h[self.init_labels[0]].split(',')[0],\n model=self.detect_model) # , image_source=image_int8)\n segmented_frame_masks = segment(image_int8, self.seg_model, boxes=detected_boxes)\n preds = torch.any(segmented_frame_masks, dim=0)\n preds = preds.unsqueeze(0).repeat(self.init_images.shape[0], *(1,) * len(preds.shape))\n # print(\"preds range after first seg \", preds.min(), preds.max())\n self.mask = preds.to(self.init_images.device)\n\n return self.mask\n\n def get_output(self, x, t, c, index, unconditional_conditioning, use_original_steps=True, quantize_denoised=True,\n return_decoded=False, return_pred_latent_x0=False):\n b, device = x.shape[0], x.device\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n with torch.enable_grad() if self.backprop_diffusion else torch.no_grad():\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n\n if return_decoded:\n # getting the original denoised image\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device)\n # current prediction for x_0\n # get the original image with range [0, 1] if it is in latent space\n pred_latent_x0 = (x - sqrt_one_minus_at * e_t_uncond) / a_t.sqrt() # e_t - > e_t_uncond\n if quantize_denoised:\n pred_latent_x0, _, *_ = self.model.first_stage_model.quantize(pred_latent_x0)\n\n pred_x0 = self.model.differentiable_decode_first_stage(\n pred_latent_x0) # if self.model_type == \"latent\" else pred_latent_x0\n # pred_x0 = torch.clamp((pred_x0 + 1.0) / 2.0, min=0.0, max=1.0)\n \n if return_pred_latent_x0:\n return e_t_uncond, e_t, pred_x0, pred_latent_x0\n else:\n return e_t_uncond, e_t, pred_x0\n else:\n return e_t_uncond, e_t\n\n def conditional_score(self, x, t, c, index, use_original_steps, quantize_denoised, unconditional_guidance_scale=1.,\n unconditional_conditioning=None, y=None):\n \"\"\"\n\n Args:\n x: input image\n t: time step\n c: conditioning\n index: index for the schedule\n use_original_steps: whether to use the original steps\n quantize_denoised: whether to quantize the denoised image\n unconditional_guidance_scale: scale for the unconditional guidance\n unconditional_conditioning: unconditional conditioning\n y: target class\n\n\n Returns:\n e_t: score after conditioning\n\n \"\"\"\n b, *_, device = *x.shape, x.device\n x = x.detach() # .requires_grad_()\n # x.requires_grad = True\n prob_best_class = None\n mask_guidance = None\n\n ## check if gradient tracking is on for x\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n return e_t\n\n # print(\"check gradient tracking onf e \", e_t.requires_grad)\n if self.guidance == \"free\":\n e_t_uncond, e_t, pred_x0 = self.get_output(x, t, c, index, unconditional_conditioning, use_original_steps,\n quantize_denoised, return_decoded=True)\n\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n return e_t\n\n # print(\"check gradient tracking onf e \", e_t.requires_grad)\n score_out = torch.zeros_like(x)\n\n with torch.enable_grad():\n x_noise = x.detach().requires_grad_()\n ret_vals = self.get_output(x_noise, t, c, index, unconditional_conditioning,\n use_original_steps, quantize_denoised=quantize_denoised,\n return_decoded=True, return_pred_latent_x0=self.log_backprop_gradients)\n if self.log_backprop_gradients:\n e_t_uncond, e_t, pred_x0, pred_latent_x0 = ret_vals\n else:\n e_t_uncond, e_t, pred_x0 = ret_vals\n\n with torch.no_grad():\n if isinstance(self.lp_custom, str) and \"dino_\" in self.lp_custom: # retain_graph causes cuda oom issues for dino distance regularizer...\n with torch.enable_grad():\n pred_x0_0to1 = torch.clamp(_map_img(pred_x0), min=0.0, max=1.0)\n lp_dist = self.distance_criterion(pred_x0_0to1, self.dino_init_features.to(x.device).detach())\n lp_grad = torch.autograd.grad(lp_dist.mean(), x_noise, retain_graph=False)[0]\n elif self.lp_custom:\n with torch.enable_grad():\n pred_x0_0to1 = torch.clamp(_map_img(pred_x0), min=0.0, max=1.0)\n lp_dist = self.distance_criterion(pred_x0_0to1, self.init_images.to(x.device))\n lp_grad = torch.autograd.grad(lp_dist.mean(), x_noise, retain_graph=True)[0]\n \n if self.classifier_lambda != 0:\n with torch.enable_grad():\n if isinstance(self.lp_custom, str) and \"dino_\" in self.lp_custom:\n x_noise = x.detach().requires_grad_()\n ret_vals = self.get_output(x_noise, t, c, index, unconditional_conditioning,\n use_original_steps, quantize_denoised=quantize_denoised,\n return_decoded=True, return_pred_latent_x0=self.log_backprop_gradients)\n if self.log_backprop_gradients:\n e_t_uncond, e_t, pred_x0, pred_latent_x0 = ret_vals\n else:\n e_t_uncond, e_t, pred_x0 = ret_vals\n pred_logits = self.get_classifier_logits(pred_x0)\n if len(pred_logits.shape) == 2: # multi-class\n log_probs = torch.nn.functional.log_softmax(pred_logits, dim=-1)\n log_probs = log_probs[range(log_probs.size(0)), y.view(-1)]\n prob_best_class = torch.exp(log_probs).detach()\n else: # binary\n loss = self.binary_classification_criterion(pred_logits, y)\n loss *= -1 # minimize this\n log_probs = loss\n prob_best_class = pred_logits.sigmoid().detach()\n\n if self.log_backprop_gradients: pred_latent_x0.retain_grad()\n\n if self.dino_pipeline:\n grad_classifier = torch.autograd.grad(log_probs.sum(), x_noise, retain_graph=False)[0]\n else:\n grad_classifier = torch.autograd.grad(log_probs.sum(), x_noise, retain_graph=True)[0]\n # grad_classifier = torch.autograd.grad(log_probs.sum(), x_noise, retain_graph=True)[0]\n # grad_classifier2 = torch.autograd.grad(log_probs[0].sum(), x_noise, retain_graph=False)[0]\n\n if self.log_backprop_gradients:\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_t_sqrt = a_t.sqrt()\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device)\n grad_pred_latent_x0 = pred_latent_x0.grad.data\n grad_unet_wrt_zt = (grad_classifier*a_t_sqrt/grad_pred_latent_x0 - 1)*(-1/sqrt_one_minus_at)\n\n cossim = torch.nn.CosineSimilarity()\n cossim_wpre = cossim(grad_classifier.view(2, -1), grad_pred_latent_x0.view(2, -1))\n \n print(torch.norm(grad_classifier, dim=(2,3)), torch.norm(grad_pred_latent_x0, dim=(2,3)), torch.norm(grad_unet_wrt_zt, dim=(2,3)))\n print(cossim_wpre)\n\n # assert e_t_uncond.requires_grad == True and e_t.requires_grad == True, \"e_t_uncond and e_t should require gradients\"\n\n # if self.guidance == \"projected\":\n implicit_classifier_score = (e_t - e_t_uncond) # .detach()\n # check gradient tracking on implicit_classifier_score\n assert implicit_classifier_score.requires_grad == False, \"implicit_classifier_score requires grad\"\n\n if self.lp_custom or self.classifier_lambda != 0:\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n\n if self.classifier_lambda != 0:\n classifier_score = -1 * grad_classifier * (1 - a_t).sqrt()\n assert classifier_score.requires_grad == False, \"classifier_score requires grad\"\n # project the gradient of the classifier on the implicit classifier\n\n\n projection_fn = cone_project if self.cone_projection_type == \"default\" else cone_project_chuncked\n projection_fn = cone_project_chuncked_zero if \"zero\" in self.cone_projection_type else projection_fn\n \n \n proj_out = projection_fn(implicit_classifier_score.view(x.shape[0], -1),\n classifier_score.view(x.shape[0], -1),\n self.deg_cone_projection,\n orig_shp=implicit_classifier_score.shape) \\\n if self.guidance == \"projected\" else classifier_score\n \n classifier_score = proj_out if self.cone_projection_type == \"default\" else proj_out[0].view_as(classifier_score)\n concensus_region = proj_out[1].unsqueeze(1) if self.cone_projection_type == \"binning\" else None\n #print(classifier_score.shape, concensus_region.shape)\n if self.enforce_same_norms:\n score_, norm_ = _renormalize_gradient(classifier_score,\n implicit_classifier_score) # e_t_uncond (AWAREE!!)\n classifier_score = self.classifier_lambda * score_\n\n else:\n classifier_score *= self.classifier_lambda\n\n score_out += classifier_score\n\n # distance gradients\n if self.lp_custom:\n\n lp_score = -1 * lp_grad * (1 - a_t).sqrt()\n\n if self.enforce_same_norms:\n score_, norm_ = _renormalize_gradient(lp_score,\n implicit_classifier_score)\n lp_score = self.dist_lambda * score_\n\n else:\n\n lp_score *= self.dist_lambda\n\n score_out -= lp_score\n\n e_t = e_t_uncond + unconditional_guidance_scale * score_out # (1 - a_t).sqrt() * grad_out\n\n \n if self.record_intermediate_results:\n # adding images to create a gif\n pred_x0_copy = pred_x0.clone().detach()\n img = torch.clamp(_map_img(pred_x0_copy), min=0.0, max=1.0)\n #img = torch.permute(img, (1, 2, 0, 3)).reshape((img.shape[1], img.shape[2], -1))\n\n self.images.append(img.detach().cpu())\n if self.classifier_lambda != 0 and self.cone_projection_type == \"binning\":\n self.concensus_regions.append(concensus_region.detach().cpu())\n \n if prob_best_class is not None:\n self.probs.append(prob_best_class.detach().cpu())\n\n return e_t\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n #pass\n # TODO: this is a hack to make it work on CPU\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps, verbose=verbose)\n #print(\"DDIM timesteps: \", self.ddim_timesteps, \"with length: \", len(self.ddim_timesteps))\n #print all input parameters\n #print(\"DDIM parameters: \", self.ddim_timesteps, ddim_discretize, ddim_eta)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta, verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, ):\n\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0, timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, y=None):\n b, *_, device = *x.shape, x.device\n\n e_t = self.conditional_score(x=x, c=c, t=t, index=index, use_original_steps=use_original_steps,\n quantize_denoised=quantize_denoised,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning, y=y)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t ** 2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas).to(x0.device)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas.to(x0.device)\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, y=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, latent_t_0=False):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n if self.masked_guidance:\n print(\"### Getting the mask ###\")\n mask = self.get_mask()\n mask = F.interpolate(mask.to(torch.uint8), size=x_latent.shape[-2:])\n # mask = self.get_mask()\n # mask = F.interpolate(mask, size=x_latent.shape[-2:], mode='bilinear', align_corners=True)\n # mask = (mask - mask.min()) / (mask.max() - mask.min())\n # mask[mask < 0.5] = 0.\n # mask[mask >= 0.5] = 1.\n\n if self.verbose:\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n else:\n iterator = range(time_range)\n\n # if latent_t_0:\n # x_orig = x_latent\n # x_dec = self.stochastic_encode(x_latent.clone(),\n # torch.tensor([t_start] * (x_latent.shape[0])).to(x_latent.device))\n # else:\n x_dec = x_latent if not latent_t_0 else self.stochastic_encode(x_latent.clone(), torch.tensor([t_start] * (x_latent.shape[0])).to(x_latent.device))\n for i, step in enumerate(iterator):\n tic = time.time()\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n\n if self.masked_guidance and latent_t_0:\n #print(\"blending with original image\")\n img_orig = self.model.q_sample(x_latent.clone(), ts)\n x_dec = img_orig * (1. - mask) + (mask) * x_dec\n\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning, y=y)\n x_dec = x_dec.detach()\n for j in range(self.self_recurrence):\n print(\"self recurrence\")\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps, unconditional_guidance_scale = 1)\n\n #workaround for long running time\n elapsed_time = time.time() - tic\n if elapsed_time > 6:\n print(f\"Iteration time {elapsed_time} exceeded limit 6 secs, terminating program...\")\n print(\"x_dec device: \", x_dec.device)\n sys.exit(1) # Terminate the program with exit code 1 (indicating an error) \n \n out = {}\n out['x_dec'] = x_dec\n out['video'] = torch.stack(self.images, dim=1) if len(self.images) != 0 else None\n out[\"mask\"] = self.mask.to(torch.float32) if self.mask is not None else None\n # print(f\"Video shape: {out['video'].shape}\")\n #out['prob'] = self.probs[-1].item() if len(self.probs) != 0 else None\n out['prob'] = self.probs[-1].detach().cpu().numpy() if len(self.probs) != 0 else None\n out['concensus_regions'] = torch.stack(self.concensus_regions, dim=1) if len(self.concensus_regions) != 0 else None\n #print(out['concensus_regions'].shape, (out[\"concensus_regions\"]>200).to(torch.float32).mean())\n self.images = []\n self.probs = []\n \n self.concensus_regions = []\n self.mask = None\n\n return out" }, { "identifier": "name_map", "path": "data/imagenet_classnames.py", "snippet": "" }, { "identifier": "DecisionDensenetModel", "path": "utils/DecisionDensenetModel.py", "snippet": "class DecisionDensenetModel(nn.Module):\n\n def __init__(self, num_classes=40, pretrained=False, query_label=-1):\n super().__init__()\n self.feat_extract = DenseNet121(pretrained=pretrained)\n self.classifier = nn.Linear(self.feat_extract.output_size, num_classes)\n self.query_label = query_label\n\n def forward(self, x, before_sigmoid=True):\n\n x = self.feat_extract(x)\n x = self.classifier(x)\n if not before_sigmoid:\n x = torch.sigmoid(x)\n return x[:, self.query_label]" }, { "identifier": "Normalizer", "path": "utils/preprocessor.py", "snippet": "class Normalizer(torch.nn.Module):\n '''\n normalizing module. Useful for computing the gradient\n to a x image (x in [0, 1]) when using a classifier with\n different normalization inputs (i.e. f((x - mu) / sigma))\n '''\n def __init__(self, classifier,\n mu=[0.485, 0.456, 0.406],\n sigma=[0.229, 0.224, 0.225]):\n super().__init__()\n self.classifier = classifier\n self.register_buffer('mu', torch.tensor(mu).view(1, -1, 1, 1))\n self.register_buffer('sigma', torch.tensor(sigma).view(1, -1, 1, 1))\n\n def forward(self, x):\n x = (x - self.mu) / self.sigma\n return self.classifier(x)" }, { "identifier": "CropAndNormalizer", "path": "utils/preprocessor.py", "snippet": "class CropAndNormalizer(torch.nn.Module):\n def __init__(self, classifier, crop_size: int=224, mu=[0.485, 0.456, 0.406], sigma=[0.229, 0.224, 0.225]) -> None:\n super().__init__()\n self.classifier = classifier\n self.crop_size = crop_size\n self.center_crop = torchvision.transforms.CenterCrop(crop_size)\n self.register_buffer('mu', torch.tensor(mu).view(1, -1, 1, 1))\n self.register_buffer('sigma', torch.tensor(sigma).view(1, -1, 1, 1))\n\n def forward(self, x):\n # assumes x in [0, 1]!\n # x = F.center_crop(x, self.crop_size)\n x = self.center_crop(x)\n x = (x - self.mu) / self.sigma\n return self.classifier(x)" }, { "identifier": "ResizeAndNormalizer", "path": "utils/preprocessor.py", "snippet": "class ResizeAndNormalizer(torch.nn.Module):\n def __init__(self, classifier, resolution: tuple=(224, 224), mu=[0.485, 0.456, 0.406], sigma=[0.229, 0.224, 0.225]) -> None:\n super().__init__()\n self.classifier = classifier\n self.resolution = resolution\n self.resize = torchvision.transforms.Resize(resolution)\n self.register_buffer('mu', torch.tensor(mu).view(1, -1, 1, 1))\n self.register_buffer('sigma', torch.tensor(sigma).view(1, -1, 1, 1))\n\n def forward(self, x):\n # assumes x in [0, 1]!\n x = self.resize(x)\n x = (x - self.mu) / self.sigma\n return self.classifier(x)" }, { "identifier": "GenericPreprocessing", "path": "utils/preprocessor.py", "snippet": "class GenericPreprocessing(torch.nn.Module):\n def __init__(self, classifier, preprocessor) -> None:\n super().__init__()\n self.classifier = classifier\n self.preprocessor = preprocessor\n\n def forward(self, x):\n # assumes x in [0, 1]!\n x = self.preprocessor(x)\n return self.classifier(x)" }, { "identifier": "Crop", "path": "utils/preprocessor.py", "snippet": "class Crop(torch.nn.Module):\n def __init__(self, classifier, crop_size: int=224) -> None:\n super().__init__()\n self.classifier = classifier\n self.crop_size = crop_size\n self.center_crop = torchvision.transforms.CenterCrop(crop_size)\n\n def forward(self, x):\n # assumes x in [0, 1]!\n x = self.center_crop(x)\n return self.classifier(x)" }, { "identifier": "VisionLanguageWrapper", "path": "utils/vision_language_wrapper.py", "snippet": "class VisionLanguageWrapper(nn.Module):\n def __init__(self, model, tokenizer, prompts) -> None:\n super().__init__()\n self.model = model\n self.tokenizer = tokenizer\n self.prompts = prompts\n\n device = next(self.model.parameters()).device\n\n text = tokenizer(prompts)\n with torch.no_grad():\n self.text_features = model.encode_text(text.to(device))\n self.text_features = self.text_features / self.text_features.norm(dim=-1, keepdim=True)\n\n def forward(self, x):\n image_features = self.model.encode_image(x)\n image_features = image_features / image_features.norm(dim=-1, keepdim=True)\n logits = 100.0 * image_features @ self.text_features.T\n return logits" }, { "identifier": "MadryNet", "path": "utils/madry_net.py", "snippet": "def MadryNet(ckpt, device):\n norm = \"l2\"\n model = load_model(\n modelname=\"Engstrom2019Robustness\", norm=norm, device=device\n )\n state_dict = torch.load(ckpt, map_location=\"cpu\")\n model.model.load_state_dict(state_dict, strict=True)\n return model" }, { "identifier": "LinearClassifier", "path": "utils/dino_linear.py", "snippet": "class LinearClassifier(nn.Module):\n \"\"\"Linear layer to train on top of frozen features\"\"\"\n def __init__(self, dim, num_labels=1000):\n super(LinearClassifier, self).__init__()\n self.num_labels = num_labels\n self.linear = nn.Linear(dim, num_labels)\n self.linear.weight.data.normal_(mean=0.0, std=0.01)\n self.linear.bias.data.zero_()\n\n def forward(self, x):\n # flatten\n x = x.view(x.size(0), -1)\n\n # linear layer\n return self.linear(x)" }, { "identifier": "DINOLinear", "path": "utils/dino_linear.py", "snippet": "class DINOLinear(nn.Module):\n def __init__(self, dino, linear_classifier) -> None:\n super().__init__()\n self.dino = dino\n self.linear = linear_classifier\n \n def forward(self, x):\n x = self.dino(x)\n return self.linear(x)" } ]
import argparse import os import psutil import yaml import copy import random import matplotlib.pyplot as plt import numpy as np import pathlib import torch import hydra import wandb import torchvision import json import sys import regex as re import open_clip from contextlib import nullcontext from torch import autocast from omegaconf import OmegaConf, open_dict from hydra.utils import instantiate from omegaconf import DictConfig, OmegaConf from torchvision import transforms, datasets from torchvision.utils import save_image from sampling_helpers import disabled_train, get_model, _unmap_img, generate_samples from sampling_helpers import load_model_hf from ldm import * from ldm.models.diffusion.cc_ddim import CCMDDIMSampler from data.imagenet_classnames import name_map, openai_imagenet_classes from utils.DecisionDensenetModel import DecisionDensenetModel from utils.preprocessor import Normalizer, CropAndNormalizer, ResizeAndNormalizer, GenericPreprocessing, Crop from utils.vision_language_wrapper import VisionLanguageWrapper from utils.madry_net import MadryNet from utils.dino_linear import LinearClassifier, DINOLinear
11,984
torch.backends.cuda.matmul.allow_tf32 = True # torch.backends.cudnn.benchmark = True try: except: print("Install OpenClip via: pip install open_clip_torch") def set_seed(seed: int = 0): torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) torch.cuda.manual_seed_all(seed) def blockPrint(): sys.stdout = open(os.devnull, 'w') def get_classifier(cfg, device): if "ImageNet" in cfg.data._target_: classifier_name = cfg.classifier_model.name if classifier_name == "robust_resnet50": classifier_model = MadryNet(cfg.classifier_model.ckpt, device) if "classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper: classifier_model = Crop(classifier_model) else: classifier_model = getattr(torchvision.models, classifier_name)(pretrained=True) if "classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper: classifier_model = CropAndNormalizer(classifier_model) elif "CelebAHQDataset" in cfg.data._target_: assert cfg.data.query_label in [20, 31, 39], 'Query label MUST be 20 (Gender), 31 (Smile), or 39 (Age) for CelebAHQ' ql = 0 if cfg.data.query_label in [31, 39]: ql = 1 if cfg.data.query_label == 31 else 2 classifier_model = DecisionDensenetModel(3, pretrained=False, query_label=ql) classifier_model.load_state_dict(torch.load(cfg.classifier_model.classifier_path, map_location='cpu')['model_state_dict']) if cfg.classifier_model.classifier_wrapper: classifier_model = Normalizer( classifier_model, [0.5] * 3, [0.5] * 3 ) elif "Flowers102" in cfg.data._target_: # fine-tuned Dino ViT B/8: https://arxiv.org/pdf/2104.14294.pdf dino = torch.hub.load('facebookresearch/dino:main', 'dino_vits8').to(device).eval() dim = dino.embed_dim linear_classifier = LinearClassifier(dim*cfg.classifier_model.n_last_blocks, 102) linear_classifier.load_state_dict(torch.load(cfg.classifier_model.classifier_path, map_location="cpu"), strict=True) linear_classifier = linear_classifier.eval().to(device)
torch.backends.cuda.matmul.allow_tf32 = True # torch.backends.cudnn.benchmark = True try: except: print("Install OpenClip via: pip install open_clip_torch") def set_seed(seed: int = 0): torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) torch.cuda.manual_seed_all(seed) def blockPrint(): sys.stdout = open(os.devnull, 'w') def get_classifier(cfg, device): if "ImageNet" in cfg.data._target_: classifier_name = cfg.classifier_model.name if classifier_name == "robust_resnet50": classifier_model = MadryNet(cfg.classifier_model.ckpt, device) if "classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper: classifier_model = Crop(classifier_model) else: classifier_model = getattr(torchvision.models, classifier_name)(pretrained=True) if "classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper: classifier_model = CropAndNormalizer(classifier_model) elif "CelebAHQDataset" in cfg.data._target_: assert cfg.data.query_label in [20, 31, 39], 'Query label MUST be 20 (Gender), 31 (Smile), or 39 (Age) for CelebAHQ' ql = 0 if cfg.data.query_label in [31, 39]: ql = 1 if cfg.data.query_label == 31 else 2 classifier_model = DecisionDensenetModel(3, pretrained=False, query_label=ql) classifier_model.load_state_dict(torch.load(cfg.classifier_model.classifier_path, map_location='cpu')['model_state_dict']) if cfg.classifier_model.classifier_wrapper: classifier_model = Normalizer( classifier_model, [0.5] * 3, [0.5] * 3 ) elif "Flowers102" in cfg.data._target_: # fine-tuned Dino ViT B/8: https://arxiv.org/pdf/2104.14294.pdf dino = torch.hub.load('facebookresearch/dino:main', 'dino_vits8').to(device).eval() dim = dino.embed_dim linear_classifier = LinearClassifier(dim*cfg.classifier_model.n_last_blocks, 102) linear_classifier.load_state_dict(torch.load(cfg.classifier_model.classifier_path, map_location="cpu"), strict=True) linear_classifier = linear_classifier.eval().to(device)
classifier_model = DINOLinear(dino, linear_classifier)
16
2023-10-10 09:40:10+00:00
16k
cpuimage/minSDXLTF
stable_diffusion_xl/stable_diffusion_xl.py
[ { "identifier": "SimpleTokenizer", "path": "stable_diffusion_xl/clip_tokenizer.py", "snippet": "class SimpleTokenizer:\n def __init__(self, bpe_path=None):\n bpe_path = bpe_path or tf.keras.utils.get_file(\n \"bpe_simple_vocab_16e6.txt.gz\",\n \"https://github.com/openai/CLIP/blob/main/clip/bpe_simple_vocab_16e6.txt.gz?raw=true\", # noqa: E501\n file_hash=\"924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a\", # noqa: E501\n )\n self.byte_encoder = bytes_to_unicode()\n self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}\n merges = gzip.open(bpe_path).read().decode(\"utf-8\").split(\"\\n\")\n merges = merges[1: 49152 - 256 - 2 + 1]\n merges = [tuple(merge.split()) for merge in merges]\n vocab = list(bytes_to_unicode().values())\n vocab = vocab + [v + \"</w>\" for v in vocab]\n for merge in merges:\n vocab.append(\"\".join(merge))\n vocab.extend([\"<|startoftext|>\", \"<|endoftext|>\"])\n self.vocab = vocab\n self.encoder = self._create_encoder(self.vocab)\n self.decoder = self._create_decoder(self.encoder)\n self.bpe_ranks = dict(zip(merges, range(len(merges))))\n\n self.special_tokens = {\n \"<|startoftext|>\": \"<|startoftext|>\",\n \"<|endoftext|>\": \"<|endoftext|>\",\n }\n self.cache = {\n \"<|startoftext|>\": \"<|startoftext|>\",\n \"<|endoftext|>\": \"<|endoftext|>\",\n }\n self.pat = self._create_pat()\n\n def _create_encoder(self, vocab):\n return dict(zip(vocab, range(len(vocab))))\n\n def _create_decoder(self, encoder):\n return {v: k for k, v in encoder.items()}\n\n def _create_pat(self):\n return re.compile(\n \"|\".join([re.escape(key) for key in self.special_tokens.keys()])\n + r\"\"\"|'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+\"\"\",\n re.IGNORECASE,\n )\n\n @property\n def end_of_text(self):\n return self.encoder[\"<|endoftext|>\"]\n\n @property\n def start_of_text(self):\n return self.encoder[\"<|startoftext|>\"]\n\n def add_tokens(self, tokens):\n if isinstance(tokens, str):\n tokens = [tokens]\n tokens_added = 0\n for token in tokens:\n if token in self.vocab:\n continue\n tokens_added += 1\n self.vocab.append(token)\n self.special_tokens[token] = token\n self.cache[token] = token\n self.encoder = self._create_encoder(self.vocab)\n self.decoder = self._create_decoder(self.encoder)\n self.pat = self._create_pat()\n return tokens_added\n\n def bpe(self, token):\n if token in self.cache:\n return self.cache[token]\n word = tuple(token[:-1]) + (token[-1] + \"</w>\",)\n pairs = get_pairs(word)\n\n if not pairs:\n return token + \"</w>\"\n\n while True:\n bigram = min(\n pairs, key=lambda pair: self.bpe_ranks.get(pair, float(\"inf\"))\n )\n if bigram not in self.bpe_ranks:\n break\n first, second = bigram\n new_word = []\n i = 0\n while i < len(word):\n try:\n j = word.index(first, i)\n new_word.extend(word[i:j])\n i = j\n except:\n new_word.extend(word[i:])\n break\n\n if (word[i] == first\n and i < len(word) - 1\n and word[i + 1] == second):\n new_word.append(first + second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n new_word = tuple(new_word)\n word = new_word\n if len(word) == 1:\n break\n else:\n pairs = get_pairs(word)\n word = \" \".join(word)\n self.cache[token] = word\n return word\n\n def encode(self, text):\n bpe_tokens = []\n text = whitespace_clean(basic_clean(text)).lower()\n for token in re.findall(self.pat, text):\n token = \"\".join(self.byte_encoder[b] for b in token.encode(\"utf-8\"))\n bpe_tokens.extend(\n self.encoder[bpe_token]\n for bpe_token in self.bpe(token).split(\" \")\n )\n return [self.start_of_text] + bpe_tokens + [self.end_of_text]\n\n def decode(self, tokens):\n text = \"\".join([self.decoder[token] for token in tokens])\n text = (\n bytearray([self.byte_decoder[c] for c in text])\n .decode(\"utf-8\", errors=\"replace\")\n .replace(\"</w>\", \" \")\n )\n return text" }, { "identifier": "DiffusionXLModel", "path": "stable_diffusion_xl/diffusion_model.py", "snippet": "class DiffusionXLModel(tf.keras.Model):\n @staticmethod\n def push_block(hidden_states, res_stack):\n res_stack.append(hidden_states)\n return res_stack\n\n @staticmethod\n def pop_block(hidden_states, res_stack):\n res_hidden_states = res_stack.pop()\n hidden_states = tf.concat([hidden_states, res_hidden_states], axis=-1)\n return hidden_states, res_stack\n\n def __init__(self, img_height=1024, img_width=1024, name=None, ckpt_path=None, lora_dict=None):\n sample = tf.keras.layers.Input((img_height // 8, img_width // 8, 4))\n timestep = tf.keras.layers.Input(())\n text_emb = tf.keras.layers.Input((None, 2048))\n text_embeds = tf.keras.layers.Input((1280,))\n time_ids = tf.keras.layers.Input((6,))\n # 1. time\n t_emb = Timesteps(320, name=\"time_proj\")(timestep)\n t_emb = tf.reshape(t_emb, (-1, 320))\n t_emb = Linear(1280, name=\"time_embedding.linear_1\")(tf.cast(t_emb, sample.dtype))\n t_emb = tf.keras.layers.Activation(\"swish\")(t_emb)\n t_emb = Linear(1280, name=\"time_embedding.linear_2\")(t_emb)\n time_embeds = Timesteps(256, name=\"add_time_proj\")(time_ids)\n time_embeds = tf.reshape(time_embeds, (-1, 1536)) # 6*256 = 1536\n add_embeds = tf.concat([text_embeds, time_embeds], axis=-1)\n add_embeds = tf.cast(add_embeds, sample.dtype)\n add_embeds = Linear(1280, name=\"add_embedding.linear_1\")(add_embeds)\n add_embeds = tf.keras.layers.Activation(\"swish\")(add_embeds)\n add_embeds = Linear(1280, name=\"add_embedding.linear_2\")(add_embeds)\n time_emb = tf.keras.layers.Activation(\"swish\")(t_emb + add_embeds)\n # 2. pre-process\n hidden_states = tf.keras.layers.Conv2D(320, kernel_size=3, strides=1, name=\"conv_in\")(\n tf.keras.layers.ZeroPadding2D(1)(sample))\n res_stack = [hidden_states]\n # 3. blocks\n # DownBlock2D\n hidden_states = ResnetBlock(320, name=\"down_blocks.0.resnets.0\")((hidden_states, time_emb))\n res_stack = self.push_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(320, name=\"down_blocks.0.resnets.1\")((hidden_states, time_emb))\n res_stack = self.push_block(hidden_states, res_stack)\n hidden_states = DownSampler(320, name=\"down_blocks.0.downsamplers.0\")(hidden_states)\n res_stack = self.push_block(hidden_states, res_stack)\n # CrossAttnDownBlock2D\n hidden_states = ResnetBlock(640, name=\"down_blocks.1.resnets.0\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(10, 64, 640, 2, name=\"down_blocks.1.attentions.0\")((hidden_states, text_emb))\n res_stack = self.push_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(640, name=\"down_blocks.1.resnets.1\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(10, 64, 640, 2, name=\"down_blocks.1.attentions.1\")((hidden_states, text_emb))\n res_stack = self.push_block(hidden_states, res_stack)\n hidden_states = DownSampler(640, name=\"down_blocks.1.downsamplers.0\")(hidden_states)\n res_stack = self.push_block(hidden_states, res_stack)\n # CrossAttnDownBlock2D\n hidden_states = ResnetBlock(1280, name=\"down_blocks.2.resnets.0\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(20, 64, 1280, 10, name=\"down_blocks.2.attentions.0\")((hidden_states, text_emb))\n res_stack = self.push_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(1280, name=\"down_blocks.2.resnets.1\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(20, 64, 1280, 10, name=\"down_blocks.2.attentions.1\")((hidden_states, text_emb))\n res_stack = self.push_block(hidden_states, res_stack)\n # UNetMidBlock2DCrossAttn\n hidden_states = ResnetBlock(1280, name=\"mid_block.resnets.0\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(20, 64, 1280, 10, name=\"mid_block.attentions.0\")((hidden_states, text_emb))\n hidden_states = ResnetBlock(1280, name=\"mid_block.resnets.1\")((hidden_states, time_emb))\n # CrossAttnUpBlock2D\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(1280, name=\"up_blocks.0.resnets.0\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(20, 64, 1280, 10, name=\"up_blocks.0.attentions.0\")((hidden_states, text_emb))\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(1280, name=\"up_blocks.0.resnets.1\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(20, 64, 1280, 10, name=\"up_blocks.0.attentions.1\")((hidden_states, text_emb))\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(1280, name=\"up_blocks.0.resnets.2\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(20, 64, 1280, 10, name=\"up_blocks.0.attentions.2\")((hidden_states, text_emb))\n hidden_states = UpSampler(1280, name=\"up_blocks.0.upsamplers.0\")(hidden_states)\n # CrossAttnUpBlock2D\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(640, name=\"up_blocks.1.resnets.0\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(10, 64, 640, 2, name=\"up_blocks.1.attentions.0\")((hidden_states, text_emb))\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(640, name=\"up_blocks.1.resnets.1\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(10, 64, 640, 2, name=\"up_blocks.1.attentions.1\")((hidden_states, text_emb))\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(640, name=\"up_blocks.1.resnets.2\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(10, 64, 640, 2, name=\"up_blocks.1.attentions.2\")((hidden_states, text_emb))\n hidden_states = UpSampler(640, name=\"up_blocks.1.upsamplers.0\")(hidden_states)\n # UpBlock2D\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(320, name=\"up_blocks.2.resnets.0\")((hidden_states, time_emb))\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(320, name=\"up_blocks.2.resnets.1\")((hidden_states, time_emb))\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(320, name=\"up_blocks.2.resnets.2\")((hidden_states, time_emb))\n hidden_states = GroupNormalization(32, epsilon=1e-05, center=True, scale=True,\n name=\"conv_norm_out\")(\n hidden_states)\n hidden_states = tf.keras.layers.Activation(\"swish\")(hidden_states)\n output = tf.keras.layers.Conv2D(4, kernel_size=3, strides=1, name=\"conv_out\")(\n tf.keras.layers.ZeroPadding2D(1)(hidden_states))\n super().__init__([sample, timestep, text_emb, time_ids, text_embeds], output, name=name)\n origin = \"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/unet/diffusion_pytorch_model.fp16.safetensors\"\n ckpt_mapping = CKPT_MAPPING[\"diffusion_model\"]\n if ckpt_path is not None:\n if os.path.exists(ckpt_path):\n load_weights_from_file(self, ckpt_path, ckpt_mapping=ckpt_mapping, key_mapping=UNET_KEY_MAPPING,\n lora_dict=lora_dict)\n return\n else:\n origin = ckpt_path\n model_weights_fpath = tf.keras.utils.get_file(origin=origin)\n if os.path.exists(model_weights_fpath):\n load_weights_from_file(self, model_weights_fpath, ckpt_mapping=ckpt_mapping, key_mapping=UNET_KEY_MAPPING,\n lora_dict=lora_dict)" }, { "identifier": "ImageDecoder", "path": "stable_diffusion_xl/image_decoder.py", "snippet": "class ImageDecoder(tf.keras.Sequential):\n def __init__(self, img_height=1024, img_width=1024, name=None, ckpt_path=None):\n super().__init__(\n [\n tf.keras.layers.Input((img_height // 8, img_width // 8, 4)),\n tf.keras.layers.Rescaling(1.0 / 0.13025),\n tf.keras.layers.Conv2D(4, 1, strides=1),\n tf.keras.layers.ZeroPadding2D(padding=1),\n tf.keras.layers.Conv2D(512, 3, strides=1),\n VaeResnetBlock(512),\n VaeAttentionBlock(512),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n UpSampler(512),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n UpSampler(512),\n VaeResnetBlock(256),\n VaeResnetBlock(256),\n VaeResnetBlock(256),\n UpSampler(256),\n VaeResnetBlock(128),\n VaeResnetBlock(128),\n VaeResnetBlock(128),\n GroupNormalization(epsilon=1e-5),\n tf.keras.layers.Activation(\"swish\"),\n tf.keras.layers.ZeroPadding2D(padding=1),\n tf.keras.layers.Conv2D(3, 3, strides=1),\n ],\n name=name)\n origin = \"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/vae_1_0/diffusion_pytorch_model.fp16.safetensors\"\n ckpt_mapping = CKPT_MAPPING[\"decoder\"]\n if ckpt_path is not None:\n if os.path.exists(ckpt_path):\n load_weights_from_file(self, ckpt_path, ckpt_mapping=ckpt_mapping, key_mapping=VAE_KEY_MAPPING)\n return\n else:\n origin = ckpt_path\n model_weights_fpath = tf.keras.utils.get_file(origin=origin)\n if os.path.exists(model_weights_fpath):\n load_weights_from_file(self, model_weights_fpath, ckpt_mapping=ckpt_mapping, key_mapping=VAE_KEY_MAPPING)" }, { "identifier": "ImageEncoder", "path": "stable_diffusion_xl/image_encoder.py", "snippet": "class ImageEncoder(tf.keras.Sequential):\n \"\"\"ImageEncoder is the VAE Encoder for StableDiffusionXL.\"\"\"\n\n def __init__(self, ckpt_path=None):\n super().__init__(\n [\n tf.keras.layers.Input((None, None, 3)),\n tf.keras.layers.ZeroPadding2D(padding=1),\n tf.keras.layers.Conv2D(128, 3, strides=1),\n VaeResnetBlock(128),\n VaeResnetBlock(128),\n DownSampler(128, padding=((0, 1), (0, 1))),\n VaeResnetBlock(256),\n VaeResnetBlock(256),\n DownSampler(256, padding=((0, 1), (0, 1))),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n DownSampler(512, padding=((0, 1), (0, 1))),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n VaeAttentionBlock(512),\n VaeResnetBlock(512),\n GroupNormalization(epsilon=1e-5),\n tf.keras.layers.Activation(\"swish\"),\n tf.keras.layers.ZeroPadding2D(padding=1),\n tf.keras.layers.Conv2D(8, 3, strides=1),\n tf.keras.layers.Conv2D(8, 1, strides=1),\n tf.keras.layers.Lambda(lambda x: tf.split(x, num_or_size_splits=2, axis=-1)[0] * 0.13025),\n ])\n origin = \"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/vae_1_0/diffusion_pytorch_model.fp16.safetensors\"\n ckpt_mapping = CKPT_MAPPING[\"encoder\"]\n if ckpt_path is not None:\n if os.path.exists(ckpt_path):\n load_weights_from_file(self, ckpt_path, ckpt_mapping=ckpt_mapping, key_mapping=VAE_KEY_MAPPING)\n return\n else:\n origin = ckpt_path\n model_weights_fpath = tf.keras.utils.get_file(origin=origin)\n if os.path.exists(model_weights_fpath):\n load_weights_from_file(self, model_weights_fpath, ckpt_mapping=ckpt_mapping, key_mapping=VAE_KEY_MAPPING)" }, { "identifier": "get_weighted_text_embeddings", "path": "stable_diffusion_xl/long_prompt_weighting.py", "snippet": "def get_weighted_text_embeddings(\n tokenizer,\n text_encoder,\n prompt: Union[str, List[str]],\n max_embeddings_multiples: Optional[int] = 4,\n no_boseos_middle: Optional[bool] = False,\n skip_parsing: Optional[bool] = False,\n skip_weighting: Optional[bool] = False,\n model_max_length=77,\n pad_token_id=49407,\n text_encoder_pool=None,\n):\n r\"\"\"\n Prompts can be assigned with local weights using brackets. For example,\n prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful',\n and the embedding tokens corresponding to the words get multiplied by a constant, 1.1.\n\n Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean.\n\n Args:\n tokenizer : provide access to the tokenizer\n text_encoder : provide access to the text encoder.\n prompt (`str` or `List[str]`):\n The prompt or prompts to guide the image generation.\n max_embeddings_multiples (`int`, *optional*, defaults to `1`):\n The max multiple length of prompt embeddings compared to the max output length of text encoder.\n no_boseos_middle (`bool`, *optional*, defaults to `False`):\n If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and\n ending token in each of the chunk in the middle.\n skip_parsing (`bool`, *optional*, defaults to `False`):\n Skip the parsing of brackets.\n skip_weighting (`bool`, *optional*, defaults to `False`):\n Skip the weighting. When the parsing is skipped, it is forced True.\n \"\"\"\n max_length = (model_max_length - 2) * max_embeddings_multiples + 2\n if isinstance(prompt, str):\n prompt = [prompt]\n\n if not skip_parsing:\n prompt_tokens, prompt_weights = get_prompts_with_weights(tokenizer, prompt, max_length - 2)\n else:\n prompt_tokens = [\n token[1:-1]\n for token in tokenizer.encode(prompt)[:max_length]\n ]\n prompt_weights = [[1.0] * len(token) for token in prompt_tokens]\n\n # round up the longest length of tokens to a multiple of (model_max_length - 2)\n max_length = max([len(token) for token in prompt_tokens])\n\n max_embeddings_multiples = min(\n max_embeddings_multiples,\n (max_length - 1) // (model_max_length - 2) + 1,\n )\n max_embeddings_multiples = max(1, max_embeddings_multiples)\n max_length = (model_max_length - 2) * max_embeddings_multiples + 2\n\n # pad the length of tokens and weights\n bos = tokenizer.start_of_text\n eos = tokenizer.end_of_text\n pad = pad_token_id\n prompt_tokens, prompt_weights = pad_tokens_and_weights(\n prompt_tokens,\n prompt_weights,\n max_length,\n bos,\n eos,\n pad,\n no_boseos_middle=no_boseos_middle,\n chunk_length=model_max_length,\n )\n prompt_tokens = np.array(prompt_tokens, dtype=np.int32)\n # get the embeddings\n if pad_token_id != 0:\n text_embeddings_pool = None\n text_embeddings = get_unweighted_text_embeddings_openai(\n text_encoder,\n prompt_tokens,\n model_max_length,\n no_boseos_middle=no_boseos_middle,\n )\n else:\n text_embeddings, text_embeddings_pool = get_unweighted_text_embeddings_laion(\n text_encoder,\n prompt_tokens,\n model_max_length,\n no_boseos_middle=no_boseos_middle,\n text_encoder_pool=text_encoder_pool,\n )\n prompt_weights = np.array(prompt_weights, dtype=text_embeddings.dtype)\n if (not skip_parsing) and (not skip_weighting):\n previous_mean = text_embeddings.mean(axis=(-2, -1))\n text_embeddings *= prompt_weights[:, :, None]\n text_embeddings *= (previous_mean / text_embeddings.mean(axis=(-2, -1)))[:, None, None]\n return text_embeddings, text_embeddings_pool" }, { "identifier": "Scheduler", "path": "stable_diffusion_xl/scheduler.py", "snippet": "class Scheduler(object):\n \"\"\"\n `LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with\n non-Markovian guidance.\n\n\n Args:\n num_train_timesteps (`int`, defaults to 1000):\n The number of diffusion steps to train the model.\n beta_start (`float`, defaults to 0.0001):\n The starting `beta` value of inference.\n beta_end (`float`, defaults to 0.02):\n The final `beta` value.\n active_lcm (`bool`, defaults true):\n apply lcm or not.\n original_inference_steps (`int`, *optional*, defaults to 50):\n The default number of inference steps used to generate a linearly-spaced timestep schedule, from which we\n will ultimately take `num_inference_steps` evenly spaced timesteps to form the final timestep schedule.\n timestep_scaling (`float`, defaults to 10.0):\n The factor the timesteps will be multiplied by when calculating the consistency model boundary conditions\n `c_skip` and `c_out`. Increasing this will decrease the approximation error (although the approximation\n error at the default of `10.0` is already pretty small).\n \"\"\"\n\n def __init__(self, num_train_timesteps: int = 1000, beta_start: float = 0.00085, beta_end: float = 0.012,\n original_inference_steps: int = 50, timestep_scaling: float = 10.0, active_lcm=True):\n self.active_lcm = active_lcm\n self.num_train_timesteps = num_train_timesteps\n self.original_inference_steps = original_inference_steps\n self.timestep_scaling = timestep_scaling\n # this schedule is very specific to the latent diffusion model.\n self.alphas_cumprod = np.cumprod(\n 1. - np.square(np.linspace(np.sqrt(beta_start), np.sqrt(beta_end), num_train_timesteps)), axis=0)\n self.signal_rates = np.sqrt(self.alphas_cumprod)\n self.noise_rates = np.sqrt(1. - self.alphas_cumprod)\n self.final_alpha_cumprod = 1.0\n # standard deviation of the initial noise distribution\n self.init_noise_sigma = 1.0\n # setable values\n self.num_inference_steps = None\n self.timesteps = np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int32)\n self._step_index = None\n\n # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index\n def _init_step_index(self, timestep):\n index_candidates = np.nonzero(self.timesteps == timestep)\n # The sigma index that is taken for the **very** first `step`\n # is always the second index (or the last index if there is only 1)\n # This way we can ensure we don't accidentally skip a sigma in\n # case we start in the middle of the denoising schedule (e.g. for image-to-image)\n if len(index_candidates) > 1:\n step_index = index_candidates[1]\n else:\n step_index = index_candidates[0]\n self._step_index = step_index\n\n @property\n def step_index(self):\n return self._step_index\n\n def set_timesteps(self, num_inference_steps: int, original_inference_steps: Optional[int] = None,\n strength: int = 1.0):\n \"\"\"\n Sets the discrete timesteps used for the diffusion chain (to be run before inference).\n\n Args:\n num_inference_steps (`int`):\n The number of diffusion steps used when generating samples with a pre-trained model.\n original_inference_steps (`int`, *optional*):\n The original number of inference steps, which will be used to generate a linearly-spaced timestep\n schedule (which is different from the standard `diffusers` implementation). We will then take\n `num_inference_steps` timesteps from this schedule, evenly spaced in terms of indices, and use that as\n our final timestep schedule. If not set, this will default to the `original_inference_steps` attribute.\n \"\"\"\n\n if num_inference_steps > self.num_train_timesteps:\n raise ValueError(\n f\"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config_train_timesteps`:\"\n f\" {self.num_train_timesteps} as the unet model trained with this scheduler can only handle\"\n f\" maximal {self.num_train_timesteps} timesteps.\")\n self.num_inference_steps = num_inference_steps\n if self.active_lcm:\n original_steps = (\n original_inference_steps if original_inference_steps is not None else self.original_inference_steps)\n\n if original_steps > self.num_train_timesteps:\n raise ValueError(\n f\"`original_steps`: {original_steps} cannot be larger than `self.config_train_timesteps`:\"\n f\" {self.num_train_timesteps} as the unet model trained with this scheduler can only handle\"\n f\" maximal {self.num_train_timesteps} timesteps.\")\n if num_inference_steps > original_steps:\n raise ValueError(\n f\"`num_inference_steps`: {num_inference_steps} cannot be larger than `original_inference_steps`:\"\n f\" {original_steps} because the final timestep schedule will be a subset of the\"\n f\" `original_inference_steps`-sized initial timestep schedule.\")\n # LCM Timesteps Setting\n # Currently, only linear spacing is supported.\n c = self.num_train_timesteps // original_steps\n # LCM Training Steps Schedule\n lcm_origin_timesteps = np.asarray(list(range(1, int(original_steps * strength) + 1))) * c - 1\n skipping_step = len(lcm_origin_timesteps) // num_inference_steps\n # LCM Inference Steps Schedule\n timesteps = lcm_origin_timesteps[::-skipping_step][:num_inference_steps]\n else:\n timesteps = np.linspace(0, 1000 - 1, num_inference_steps, dtype=np.int32)[::-1]\n self.timesteps = timesteps.copy().astype(np.int32)\n self._step_index = None\n\n def get_scalings_for_boundary_condition_discrete(self, timestep, sigma_data=0.5):\n scaled_timestep = timestep * self.timestep_scaling\n c_skip = sigma_data ** 2 / (scaled_timestep ** 2 + sigma_data ** 2)\n c_out = scaled_timestep / (scaled_timestep ** 2 + sigma_data ** 2) ** 0.5\n return c_skip, c_out\n\n def step(self, latent: np.ndarray, timestep: int, latent_prev: np.ndarray):\n \"\"\"\n Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion\n process from the learned model outputs (most often the predicted noise).\n\n Args:\n latent (`np.ndarray`):\n The direct output from learned diffusion model.\n timestep (`float`):\n The current discrete timestep in the diffusion chain.\n latent_prev (`np.ndarray`):\n A current instance of a sample created by the diffusion process.\n \"\"\"\n if self.num_inference_steps is None:\n raise ValueError(\n \"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler\")\n\n if self.step_index is None:\n self._init_step_index(timestep)\n # 1. get previous step value\n prev_step_index = self.step_index + 1\n if prev_step_index < len(self.timesteps):\n prev_timestep = self.timesteps[prev_step_index]\n else:\n prev_timestep = timestep\n next_signal_rates = self.signal_rates[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod\n next_noise_rates = self.noise_rates[prev_timestep]\n signal_rates = self.signal_rates[timestep]\n noise_rates = self.noise_rates[timestep]\n # 2. Compute the predicted original sample x_0 based on the model parameterization\n pred_x0 = (latent_prev - noise_rates * latent) / signal_rates\n # 3. Denoise model output using boundary conditions\n if self.active_lcm:\n # 4. Get scalings for boundary conditions\n c_skip, c_out = self.get_scalings_for_boundary_condition_discrete(timestep)\n denoised = c_out * pred_x0 + c_skip * latent_prev\n # 5. Sample and inject noise z ~ N(0, I) for MultiStep Inference\n # Noise is not used on the final timestep of the timestep schedule.\n # This also means that noise is not used for one-step sampling.\n if self.step_index != self.num_inference_steps - 1:\n noise = np.random.randn(*latent.shape).astype(np.float32)\n latent = next_signal_rates * denoised + next_noise_rates * noise\n else:\n latent = denoised\n else:\n if self.step_index != self.num_inference_steps - 1:\n latent = next_signal_rates * pred_x0 + next_noise_rates * latent\n else:\n latent = pred_x0\n # upon completion increase step index by one\n self._step_index += 1\n return latent\n\n def __len__(self):\n return self.num_train_timesteps" }, { "identifier": "TextEncoderLaion", "path": "stable_diffusion_xl/text_encoder_laion.py", "snippet": "class TextEncoderLaion(tf.keras.Model):\n def __init__(self, max_length=77, embed_dim=1280, vocab_size=49408, num_heads=20, num_layers=32, name=None,\n ckpt_path=None, lora_dict=None):\n tokens = tf.keras.layers.Input(shape=(max_length,), dtype=\"int32\", name=\"tokens\")\n positions = tf.keras.layers.Input(shape=(max_length,), dtype=\"int32\", name=\"positions\")\n clip_emb = CLIPEmbedding(vocab_size, embed_dim, max_length, name=\"embeddings\")([tokens, positions])\n x = clip_emb\n out = []\n for idx in range(num_layers):\n x = CLIPEncoderLayer(embed_dim, num_heads, activation=gelu,\n name=\"text_model.encoder.layers.{}\".format(idx))(x)\n out.append(x)\n embedded = tf.keras.layers.LayerNormalization(epsilon=1e-5, name=\"text_model.final_layer_norm\")(out[-1])\n super().__init__([tokens, positions], [out[-2], embedded], name=name)\n origin = \"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/text_encoder_2/model.fp16.safetensors\"\n ckpt_mapping = [('text_model.embeddings.token_embedding.weight', None),\n ('text_model.embeddings.position_embedding.weight', None)]\n for idx in range(0, num_layers):\n layers_name = 'text_model.encoder.layers.{}'.format(idx)\n ckpt_mapping.append(('{}.layer_norm1.weight'.format(layers_name), None))\n ckpt_mapping.append(('{}.layer_norm1.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.q_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.q_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.k_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.k_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.v_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.v_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.out_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.out_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.layer_norm2.weight'.format(layers_name), None))\n ckpt_mapping.append(('{}.layer_norm2.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.mlp.fc1.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.mlp.fc1.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.mlp.fc2.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.mlp.fc2.bias'.format(layers_name), None))\n ckpt_mapping.append(('text_model.final_layer_norm.weight', None))\n ckpt_mapping.append(('text_model.final_layer_norm.bias', None))\n # ckpt_mapping.append(('text_projection.weight', (1, 0)))\n if ckpt_path is not None:\n if os.path.exists(ckpt_path):\n load_weights_from_file(self, ckpt_path, ckpt_mapping=ckpt_mapping, lora_dict=lora_dict)\n return\n else:\n origin = ckpt_path\n model_weights_fpath = tf.keras.utils.get_file(origin=origin)\n if os.path.exists(model_weights_fpath):\n load_weights_from_file(self, model_weights_fpath, ckpt_mapping=ckpt_mapping, lora_dict=lora_dict)" }, { "identifier": "TextEncoderLaionProj", "path": "stable_diffusion_xl/text_encoder_laion.py", "snippet": "class TextEncoderLaionProj(tf.keras.Model):\n def __init__(self, embed_dim=1280, name=None, ckpt_path=None, lora_dict=None):\n embedded = tf.keras.layers.Input(shape=(embed_dim,), dtype=\"float32\", name=\"embedded\")\n proje_out = tf.keras.layers.Dense(1280, name=\"text_projection\", use_bias=False)(embedded)\n super().__init__(embedded, proje_out, name=name)\n origin = \"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/text_encoder_2/model.fp16.safetensors\"\n ckpt_mapping = [('text_projection.weight', (1, 0))]\n if ckpt_path is not None:\n if os.path.exists(ckpt_path):\n load_weights_from_file(self, ckpt_path, ckpt_mapping=ckpt_mapping, lora_dict=lora_dict)\n return\n else:\n origin = ckpt_path\n model_weights_fpath = tf.keras.utils.get_file(origin=origin)\n if os.path.exists(model_weights_fpath):\n load_weights_from_file(self, model_weights_fpath, ckpt_mapping=ckpt_mapping, lora_dict=lora_dict)" }, { "identifier": "TextEncoderOpenAi", "path": "stable_diffusion_xl/text_encoder_openai.py", "snippet": "class TextEncoderOpenAi(tf.keras.Model):\n def __init__(self, max_length=77, embed_dim=768, vocab_size=49408, num_heads=12, num_layers=12, clip_skip=-2,\n final_layer_norm=False,\n name=None,\n ckpt_path=None, lora_dict=None):\n tokens = tf.keras.layers.Input(shape=(max_length,), dtype=\"int32\", name=\"tokens\")\n positions = tf.keras.layers.Input(shape=(max_length,), dtype=\"int32\", name=\"positions\")\n clip_emb = CLIPEmbedding(vocab_size, embed_dim, max_length, name=\"embeddings\")([tokens, positions])\n x = clip_emb\n out = []\n for idx in range(num_layers):\n x = CLIPEncoderLayer(embed_dim, num_heads, activation=quick_gelu,\n name=\"text_model.encoder.layers.{}\".format(idx))(x)\n out.append(x)\n embedded = out[clip_skip]\n if final_layer_norm:\n embedded = tf.keras.layers.LayerNormalization(epsilon=1e-5, name=\"text_model.final_layer_norm\")(embedded)\n super().__init__([tokens, positions], embedded, name=name)\n origin = \"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/text_encoder/model.fp16.safetensors\"\n ckpt_mapping = [('text_model.embeddings.token_embedding.weight', None),\n ('text_model.embeddings.position_embedding.weight', None)]\n for idx in range(0, num_layers + clip_skip + 1):\n layers_name = 'text_model.encoder.layers.{}'.format(idx)\n ckpt_mapping.append(('{}.layer_norm1.weight'.format(layers_name), None))\n ckpt_mapping.append(('{}.layer_norm1.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.q_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.q_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.k_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.k_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.v_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.v_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.out_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.out_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.layer_norm2.weight'.format(layers_name), None))\n ckpt_mapping.append(('{}.layer_norm2.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.mlp.fc1.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.mlp.fc1.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.mlp.fc2.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.mlp.fc2.bias'.format(layers_name), None))\n if final_layer_norm:\n ckpt_mapping.append(('text_model.final_layer_norm.weight', None))\n ckpt_mapping.append(('text_model.final_layer_norm.bias', None))\n if ckpt_path is not None:\n if os.path.exists(ckpt_path):\n load_weights_from_file(self, ckpt_path, ckpt_mapping=ckpt_mapping, lora_dict=lora_dict)\n return\n else:\n origin = ckpt_path\n model_weights_fpath = tf.keras.utils.get_file(origin=origin)\n if os.path.exists(model_weights_fpath):\n load_weights_from_file(self, model_weights_fpath, ckpt_mapping=ckpt_mapping, lora_dict=lora_dict)" } ]
import numpy as np import tensorflow as tf from PIL import Image from scipy.ndimage import correlate1d from .clip_tokenizer import SimpleTokenizer from .diffusion_model import DiffusionXLModel from .image_decoder import ImageDecoder from .image_encoder import ImageEncoder from .long_prompt_weighting import get_weighted_text_embeddings from .scheduler import Scheduler from .text_encoder_laion import TextEncoderLaion, TextEncoderLaionProj from .text_encoder_openai import TextEncoderOpenAi
13,264
negative_prompt = "" unconditional_context, unconditional_add_text_embeds = self.encode_text(negative_prompt) unconditional_context = self._expand_tensor(unconditional_context, batch_size) if diffusion_noise is not None: diffusion_noise = np.squeeze(diffusion_noise) if len(diffusion_noise.shape) == 3: diffusion_noise = np.repeat(np.expand_dims(diffusion_noise, axis=0), batch_size, axis=0) # Iterative reverse diffusion stage self.scheduler.set_timesteps(num_steps) timesteps = self.scheduler.timesteps[::-1] init_time = None init_latent = None input_image_array = None input_mask_array = None latent_mask_tensor = None if inpaint_mask is not None: input_mask_array, latent_mask_tensor = self.preprocessed_mask(inpaint_mask, mask_blur_strength) if input_mask_array is None or latent_mask_tensor is None: print("wrong inpaint mask:{}".format(inpaint_mask)) if reference_image is not None and (0. < reference_image_strength < 1.): input_image_array, input_image_tensor = self.preprocessed_image(reference_image) if input_image_tensor is not None: num_steps = int(num_steps * reference_image_strength + 0.5) init_time = timesteps[num_steps] init_latent = self.image_encoder.predict_on_batch(input_image_tensor) timesteps = timesteps[:num_steps] else: print("wrong reference image:{}".format(reference_image)) latent = self._get_initial_diffusion_latent(batch_size=batch_size, init_latent=init_latent, init_time=init_time, seed=seed, noise=diffusion_noise) progbar = tf.keras.utils.Progbar(len(timesteps)) iteration = 0 if original_size is None: original_size = [self.img_height, self.img_width] if target_size is None: target_size = [self.img_height, self.img_width] add_time_ids = tf.expand_dims( tf.convert_to_tensor(list(list(original_size) + list(crops_coords_top_left) + list(target_size)), latent.dtype), axis=0) for index, timestep in list(enumerate(timesteps))[::-1]: latent_prev = latent # Set aside the previous latent vector time_emb = np.repeat(np.reshape(timestep, [1, -1]), batch_size, axis=0) if unconditional_guidance_scale > 0.0: unconditional_latent = self.diffusion_model.predict_on_batch( [latent, time_emb, unconditional_context, add_time_ids, tf.zeros_like(add_text_embeds)]) latent_text = self.diffusion_model.predict_on_batch( [latent, time_emb, context, add_time_ids, add_text_embeds]) latent = unconditional_latent + unconditional_guidance_scale * ( latent_text - unconditional_latent) if guidance_rescale > 0.0: # Based on 3.4. in https://arxiv.org/abs/2305.08891 latent = self.rescale_noise_cfg(latent, latent_text, guidance_rescale=guidance_rescale) else: latent = self.diffusion_model.predict_on_batch( [latent, time_emb, context, add_time_ids, add_text_embeds]) latent = self.scheduler.step(latent, timestep, latent_prev) if latent_mask_tensor is not None and init_latent is not None: latent_orgin = self._get_initial_diffusion_latent(batch_size=batch_size, init_latent=init_latent, init_time=timestep, seed=seed, noise=diffusion_noise) latent = latent_orgin * (1. - latent_mask_tensor) + latent * latent_mask_tensor iteration += 1 if callback is not None: callback(iteration) progbar.update(iteration) # Decoding stage decoded = self.image_decoder.predict_on_batch(latent) decoded = np.array(((decoded + 1.) * 0.5), dtype=np.float32) if input_mask_array is not None and input_image_array is not None: decoded = input_image_array * (1. - input_mask_array) + decoded * input_mask_array return np.clip(decoded * 255., 0, 255).astype("uint8") def _expand_tensor(self, text_embedding, batch_size): """Extends a tensor by repeating it to fit the shape of the given batch size.""" text_embedding = np.squeeze(text_embedding) if len(text_embedding.shape) == 2: text_embedding = np.repeat( np.expand_dims(text_embedding, axis=0), batch_size, axis=0 ) return text_embedding @property def image_encoder(self): pass @property def text_encoder_openai(self): pass @property def text_encoder_laion(self): pass @property def text_encoder_laion_proj(self): pass @property def diffusion_model(self): pass @property def image_decoder(self): pass @property def tokenizer(self): """tokenizer returns the tokenizer used for text inputs. Can be overriden for tasks like textual inversion where the tokenizer needs to be modified. """ if self._tokenizer is None:
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Keras implementation of StableDiffusionXL.""" MAX_PROMPT_LENGTH = 77 class StableDiffusionXLBase: """Base class for stable diffusion xl model.""" def __init__(self, img_height=1024, img_width=1024, jit_compile=False, active_lcm=False): self.img_height = img_height self.img_width = img_width # lazy initialize the component models and the tokenizer self._image_encoder = None self._text_encoder_laion = None self._text_encoder_laion_proj = None self._text_encoder_openai = None self._diffusion_model = None self._image_decoder = None self._tokenizer = None self.jit_compile = jit_compile self.active_lcm = active_lcm self.scheduler = Scheduler(active_lcm=active_lcm) def text_to_image( self, prompt, negative_prompt=None, batch_size=1, num_steps=50, unconditional_guidance_scale=7.5, seed=None, original_size=None, crops_coords_top_left=(0, 0), target_size=None, guidance_rescale=0.7, callback=None): encoded_text, add_text_embeds = self.encode_text(prompt) return self.generate_image( encoded_text, add_text_embeds, negative_prompt=negative_prompt, batch_size=batch_size, num_steps=num_steps, unconditional_guidance_scale=unconditional_guidance_scale, seed=seed, original_size=original_size, crops_coords_top_left=crops_coords_top_left, target_size=target_size, guidance_rescale=guidance_rescale, callback=callback) def image_to_image( self, prompt, negative_prompt=None, batch_size=1, num_steps=50, unconditional_guidance_scale=7.5, seed=None, reference_image=None, reference_image_strength=0.8, original_size=None, crops_coords_top_left=(0, 0), target_size=None, guidance_rescale=0.7, callback=None): encoded_text, add_text_embeds = self.encode_text(prompt) return self.generate_image( encoded_text, add_text_embeds, negative_prompt=negative_prompt, batch_size=batch_size, num_steps=num_steps, unconditional_guidance_scale=unconditional_guidance_scale, seed=seed, reference_image=reference_image, reference_image_strength=reference_image_strength, original_size=original_size, crops_coords_top_left=crops_coords_top_left, target_size=target_size, guidance_rescale=guidance_rescale, callback=callback) def inpaint( self, prompt, negative_prompt=None, batch_size=1, num_steps=50, unconditional_guidance_scale=7.5, seed=None, reference_image=None, reference_image_strength=0.8, inpaint_mask=None, mask_blur_strength=None, original_size=None, crops_coords_top_left=(0, 0), target_size=None, guidance_rescale=0.7, callback=None): encoded_text, add_text_embeds = self.encode_text(prompt) return self.generate_image( encoded_text, add_text_embeds, negative_prompt=negative_prompt, batch_size=batch_size, num_steps=num_steps, unconditional_guidance_scale=unconditional_guidance_scale, seed=seed, reference_image=reference_image, reference_image_strength=reference_image_strength, inpaint_mask=inpaint_mask, mask_blur_strength=mask_blur_strength, original_size=original_size, crops_coords_top_left=crops_coords_top_left, target_size=target_size, guidance_rescale=guidance_rescale, callback=callback) def encode_text(self, prompt): """Encodes a prompt into a latent text encoding. The encoding produced by this method should be used as the `encoded_text` parameter of `StableDiffusion.generate_image`. Encoding text separately from generating an image can be used to arbitrarily modify the text encoding prior to image generation, e.g. for walking between two prompts. Args: prompt: a string to encode, must be 77 tokens or shorter. Example: ```python from keras_cv.models import StableDiffusion model = StableDiffusionXL(img_height=1024, img_width=1024, jit_compile=True) encoded_text = model.encode_text("Tacos at dawn") img = model.generate_image(encoded_text) ``` """ # Tokenize prompt (i.e. starting context) context_openai, _ = get_weighted_text_embeddings(self.tokenizer, self.text_encoder_openai, prompt, model_max_length=MAX_PROMPT_LENGTH, pad_token_id=49407) context_laion, add_text_embeds = get_weighted_text_embeddings(self.tokenizer, self.text_encoder_laion, prompt, model_max_length=MAX_PROMPT_LENGTH, pad_token_id=0, text_encoder_pool=self.text_encoder_laion_proj) return np.concatenate([context_openai, context_laion], axis=-1), add_text_embeds def gaussian_blur(self, image, radius=3, h_axis=1, v_axis=2): def build_filter1d(kernel_size): if kernel_size == 1: filter1d = [1] else: triangle = [[1, 1]] for i in range(1, kernel_size - 1): cur_row = [1] prev_row = triangle[i - 1] for j in range(len(prev_row) - 1): cur_row.append(prev_row[j] + prev_row[j + 1]) cur_row.append(1) triangle.append(cur_row) filter1d = triangle[-1] filter1d = np.reshape(filter1d, (kernel_size,)) return filter1d / np.sum(filter1d) weights = build_filter1d(radius) # Apply filter horizontally blurred_image = correlate1d(image, weights, axis=h_axis, output=None, mode="reflect", cval=0.0, origin=0) # Apply filter vertically blurred_image = correlate1d(blurred_image, weights, axis=v_axis, output=None, mode="reflect", cval=0.0, origin=0) return blurred_image @staticmethod def resize(image_array, new_h=None, new_w=None): h, w, c = image_array.shape if new_h == h and new_w == w: return image_array h_bounds = 0, h - 1 w_bounds = 0, w - 1 y = np.expand_dims(np.linspace(h_bounds[0], h_bounds[1], new_h), axis=-1) x = np.expand_dims(np.linspace(w_bounds[0], w_bounds[1], new_w), axis=0) # Calculate the floor and ceiling values of x and y x_floor = np.floor(x).astype(int) x_ceil = np.ceil(x).astype(int) y_floor = np.floor(y).astype(int) y_ceil = np.ceil(y).astype(int) # Clip the values to stay within the image bounds x_floor = np.clip(x_floor, w_bounds[0], w_bounds[1]) x_ceil = np.clip(x_ceil, w_bounds[0], w_bounds[1]) y_floor = np.clip(y_floor, h_bounds[0], h_bounds[1]) y_ceil = np.clip(y_ceil, h_bounds[0], h_bounds[1]) # Calculate the fractional part of x and y dx = x - x_floor dy = y - y_floor # Get the values of the four neighboring pixels dx = np.expand_dims(dx, axis=-1) dy = np.expand_dims(dy, axis=-1) q11 = image_array[y_floor, x_floor, :] q21 = image_array[y_floor, x_ceil, :] q12 = image_array[y_ceil, x_floor, :] q22 = image_array[y_ceil, x_ceil, :] # Perform bilinear interpolation top_interp = q11 * (1.0 - dx) + q21 * dx bottom_interp = q12 * (1.0 - dx) + q22 * dx interpolated = top_interp * (1.0 - dy) + bottom_interp * dy return interpolated def preprocessed_image(self, x): if type(x) is str: x = np.array(Image.open(x).convert("RGB")) else: x = np.asarray(x) image_array = self.resize(x, self.img_height, self.img_width) image_array = np.array(image_array, dtype=np.float32) / 255.0 input_image_array = image_array[None, ..., :3] input_image_tensor = input_image_array * 2.0 - 1.0 return input_image_array, input_image_tensor def preprocessed_mask(self, x, blur_radius=5): if type(x) is str: x = np.array(Image.open(x).convert("L")) else: x = np.asarray(x) if len(x.shape) == 2: x = np.expand_dims(x, axis=-1) mask_array = self.resize(x, self.img_height, self.img_width) if mask_array.shape[-1] != 1: mask_array = np.mean(mask_array, axis=-1, keepdims=True) input_mask_array = np.array(mask_array, dtype=np.float32) / 255.0 if blur_radius is not None: input_mask_array = self.gaussian_blur(input_mask_array, radius=blur_radius, h_axis=0, v_axis=1) latent_mask_tensor = self.resize(input_mask_array, self.img_width // 8, self.img_height // 8) return np.expand_dims(input_mask_array, axis=0), np.expand_dims(latent_mask_tensor, axis=0) def rescale_noise_cfg(self, noise_cfg, noise_pred_text, guidance_rescale=0.0, epsilon=1e-05): """ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/abs/2305.08891). See Section 3.4 """ std_text = np.std(noise_pred_text, axis=tuple(range(1, len(noise_pred_text.shape))), keepdims=True) std_cfg = np.std(noise_cfg, axis=tuple(range(1, len(noise_cfg.shape))), keepdims=True) + epsilon # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1.0 - guidance_rescale) * noise_cfg return noise_cfg def generate_image( self, encoded_text, add_text_embeds, negative_prompt=None, batch_size=1, num_steps=50, unconditional_guidance_scale=7.5, diffusion_noise=None, seed=None, inpaint_mask=None, mask_blur_strength=None, reference_image=None, reference_image_strength=0.8, callback=None, original_size=None, crops_coords_top_left=(0, 0), guidance_rescale=0.0, target_size=None): """Generates an image based on encoded text. The encoding passed to this method should be derived from `StableDiffusion.encode_text`. Args: encoded_text: Tensor of shape (`batch_size`, 77, 768), or a Tensor of shape (77, 768). When the batch axis is omitted, the same encoded text will be used to produce every generated image. batch_size: int, number of images to generate, defaults to 1. negative_prompt: a string containing information to negatively guide the image generation (e.g. by removing or altering certain aspects of the generated image), defaults to None. num_steps: int, number of diffusion steps (controls image quality), defaults to 50. unconditional_guidance_scale: float, controlling how closely the image should adhere to the prompt. Larger values result in more closely adhering to the prompt, but will make the image noisier. Defaults to 7.5. diffusion_noise: Tensor of shape (`batch_size`, img_height // 8, img_width // 8, 4), or a Tensor of shape (img_height // 8, img_width // 8, 4). Optional custom noise to seed the diffusion process. When the batch axis is omitted, the same noise will be used to seed diffusion for every generated image. seed: integer which is used to seed the random generation of diffusion noise, only to be specified if `diffusion_noise` is None. Example: ```python from stable_diffusion_xl.stable_diffusion_xl import StableDiffusionXL batch_size = 8 model = StableDiffusionXL(img_height=1024, img_width=1024, jit_compile=True) e_tacos = model.encode_text("Tacos at dawn") e_watermelons = model.encode_text("Watermelons at dusk") e_interpolated = tf.linspace(e_tacos, e_watermelons, batch_size) images = model.generate_image(e_interpolated, batch_size=batch_size) ``` """ if diffusion_noise is not None and seed is not None: raise ValueError( "`diffusion_noise` and `seed` should not both be passed to " "`generate_image`. `seed` is only used to generate diffusion " "noise when it's not already user-specified." ) context = self._expand_tensor(encoded_text, batch_size) if negative_prompt is None: negative_prompt = "" unconditional_context, unconditional_add_text_embeds = self.encode_text(negative_prompt) unconditional_context = self._expand_tensor(unconditional_context, batch_size) if diffusion_noise is not None: diffusion_noise = np.squeeze(diffusion_noise) if len(diffusion_noise.shape) == 3: diffusion_noise = np.repeat(np.expand_dims(diffusion_noise, axis=0), batch_size, axis=0) # Iterative reverse diffusion stage self.scheduler.set_timesteps(num_steps) timesteps = self.scheduler.timesteps[::-1] init_time = None init_latent = None input_image_array = None input_mask_array = None latent_mask_tensor = None if inpaint_mask is not None: input_mask_array, latent_mask_tensor = self.preprocessed_mask(inpaint_mask, mask_blur_strength) if input_mask_array is None or latent_mask_tensor is None: print("wrong inpaint mask:{}".format(inpaint_mask)) if reference_image is not None and (0. < reference_image_strength < 1.): input_image_array, input_image_tensor = self.preprocessed_image(reference_image) if input_image_tensor is not None: num_steps = int(num_steps * reference_image_strength + 0.5) init_time = timesteps[num_steps] init_latent = self.image_encoder.predict_on_batch(input_image_tensor) timesteps = timesteps[:num_steps] else: print("wrong reference image:{}".format(reference_image)) latent = self._get_initial_diffusion_latent(batch_size=batch_size, init_latent=init_latent, init_time=init_time, seed=seed, noise=diffusion_noise) progbar = tf.keras.utils.Progbar(len(timesteps)) iteration = 0 if original_size is None: original_size = [self.img_height, self.img_width] if target_size is None: target_size = [self.img_height, self.img_width] add_time_ids = tf.expand_dims( tf.convert_to_tensor(list(list(original_size) + list(crops_coords_top_left) + list(target_size)), latent.dtype), axis=0) for index, timestep in list(enumerate(timesteps))[::-1]: latent_prev = latent # Set aside the previous latent vector time_emb = np.repeat(np.reshape(timestep, [1, -1]), batch_size, axis=0) if unconditional_guidance_scale > 0.0: unconditional_latent = self.diffusion_model.predict_on_batch( [latent, time_emb, unconditional_context, add_time_ids, tf.zeros_like(add_text_embeds)]) latent_text = self.diffusion_model.predict_on_batch( [latent, time_emb, context, add_time_ids, add_text_embeds]) latent = unconditional_latent + unconditional_guidance_scale * ( latent_text - unconditional_latent) if guidance_rescale > 0.0: # Based on 3.4. in https://arxiv.org/abs/2305.08891 latent = self.rescale_noise_cfg(latent, latent_text, guidance_rescale=guidance_rescale) else: latent = self.diffusion_model.predict_on_batch( [latent, time_emb, context, add_time_ids, add_text_embeds]) latent = self.scheduler.step(latent, timestep, latent_prev) if latent_mask_tensor is not None and init_latent is not None: latent_orgin = self._get_initial_diffusion_latent(batch_size=batch_size, init_latent=init_latent, init_time=timestep, seed=seed, noise=diffusion_noise) latent = latent_orgin * (1. - latent_mask_tensor) + latent * latent_mask_tensor iteration += 1 if callback is not None: callback(iteration) progbar.update(iteration) # Decoding stage decoded = self.image_decoder.predict_on_batch(latent) decoded = np.array(((decoded + 1.) * 0.5), dtype=np.float32) if input_mask_array is not None and input_image_array is not None: decoded = input_image_array * (1. - input_mask_array) + decoded * input_mask_array return np.clip(decoded * 255., 0, 255).astype("uint8") def _expand_tensor(self, text_embedding, batch_size): """Extends a tensor by repeating it to fit the shape of the given batch size.""" text_embedding = np.squeeze(text_embedding) if len(text_embedding.shape) == 2: text_embedding = np.repeat( np.expand_dims(text_embedding, axis=0), batch_size, axis=0 ) return text_embedding @property def image_encoder(self): pass @property def text_encoder_openai(self): pass @property def text_encoder_laion(self): pass @property def text_encoder_laion_proj(self): pass @property def diffusion_model(self): pass @property def image_decoder(self): pass @property def tokenizer(self): """tokenizer returns the tokenizer used for text inputs. Can be overriden for tasks like textual inversion where the tokenizer needs to be modified. """ if self._tokenizer is None:
self._tokenizer = SimpleTokenizer()
0
2023-10-14 18:40:16+00:00
16k
spla-tam/SplaTAM
scripts/iphone_demo.py
[ { "identifier": "relative_transformation", "path": "datasets/gradslam_datasets/geometryutils.py", "snippet": "def relative_transformation(\n trans_01: torch.Tensor, trans_02: torch.Tensor, orthogonal_rotations: bool = False\n) -> torch.Tensor:\n r\"\"\"Function that computes the relative homogenous transformation from a\n reference transformation :math:`T_1^{0} = \\begin{bmatrix} R_1 & t_1 \\\\\n \\mathbf{0} & 1 \\end{bmatrix}` to destination :math:`T_2^{0} =\n \\begin{bmatrix} R_2 & t_2 \\\\ \\mathbf{0} & 1 \\end{bmatrix}`.\n\n .. note:: Works with imperfect (non-orthogonal) rotation matrices as well.\n\n The relative transformation is computed as follows:\n\n .. math::\n\n T_1^{2} = (T_0^{1})^{-1} \\cdot T_0^{2}\n\n Arguments:\n trans_01 (torch.Tensor): reference transformation tensor of shape\n :math:`(N, 4, 4)` or :math:`(4, 4)`.\n trans_02 (torch.Tensor): destination transformation tensor of shape\n :math:`(N, 4, 4)` or :math:`(4, 4)`.\n orthogonal_rotations (bool): If True, will invert `trans_01` assuming `trans_01[:, :3, :3]` are\n orthogonal rotation matrices (more efficient). Default: False\n\n Shape:\n - Output: :math:`(N, 4, 4)` or :math:`(4, 4)`.\n\n Returns:\n torch.Tensor: the relative transformation between the transformations.\n\n Example::\n >>> trans_01 = torch.eye(4) # 4x4\n >>> trans_02 = torch.eye(4) # 4x4\n >>> trans_12 = gradslam.geometry.geometryutils.relative_transformation(trans_01, trans_02) # 4x4\n \"\"\"\n if not torch.is_tensor(trans_01):\n raise TypeError(\n \"Input trans_01 type is not a torch.Tensor. Got {}\".format(type(trans_01))\n )\n if not torch.is_tensor(trans_02):\n raise TypeError(\n \"Input trans_02 type is not a torch.Tensor. Got {}\".format(type(trans_02))\n )\n if not trans_01.dim() in (2, 3) and trans_01.shape[-2:] == (4, 4):\n raise ValueError(\n \"Input must be a of the shape Nx4x4 or 4x4.\"\n \" Got {}\".format(trans_01.shape)\n )\n if not trans_02.dim() in (2, 3) and trans_02.shape[-2:] == (4, 4):\n raise ValueError(\n \"Input must be a of the shape Nx4x4 or 4x4.\"\n \" Got {}\".format(trans_02.shape)\n )\n if not trans_01.dim() == trans_02.dim():\n raise ValueError(\n \"Input number of dims must match. Got {} and {}\".format(\n trans_01.dim(), trans_02.dim()\n )\n )\n trans_10: torch.Tensor = (\n inverse_transformation(trans_01)\n if orthogonal_rotations\n else torch.inverse(trans_01)\n )\n trans_12: torch.Tensor = compose_transformations(trans_10, trans_02)\n return trans_12" }, { "identifier": "seed_everything", "path": "utils/common_utils.py", "snippet": "def seed_everything(seed=42):\n \"\"\"\n Set the `seed` value for torch and numpy seeds. Also turns on\n deterministic execution for cudnn.\n \n Parameters:\n - seed: A hashable seed value\n \"\"\"\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n print(f\"Seed set to: {seed} (type: {type(seed)})\")" }, { "identifier": "save_params_ckpt", "path": "utils/common_utils.py", "snippet": "def save_params_ckpt(output_params, output_dir, time_idx):\n # Convert to CPU Numpy Arrays\n to_save = params2cpu(output_params)\n # Save the Parameters containing the Gaussian Trajectories\n os.makedirs(output_dir, exist_ok=True)\n print(f\"Saving parameters to: {output_dir}\")\n save_path = os.path.join(output_dir, \"params\"+str(time_idx)+\".npz\")\n np.savez(save_path, **to_save)" }, { "identifier": "save_params", "path": "utils/common_utils.py", "snippet": "def save_params(output_params, output_dir):\n # Convert to CPU Numpy Arrays\n to_save = params2cpu(output_params)\n # Save the Parameters containing the Gaussian Trajectories\n os.makedirs(output_dir, exist_ok=True)\n print(f\"Saving parameters to: {output_dir}\")\n save_path = os.path.join(output_dir, \"params.npz\")\n np.savez(save_path, **to_save)" }, { "identifier": "report_progress", "path": "utils/eval_helpers.py", "snippet": "def report_progress(params, data, i, progress_bar, iter_time_idx, sil_thres, every_i=1, qual_every_i=1, \n tracking=False, mapping=False, wandb_run=None, wandb_step=None, wandb_save_qual=False, online_time_idx=None,\n global_logging=True):\n if i % every_i == 0 or i == 1:\n if wandb_run is not None:\n if tracking:\n stage = \"Tracking\"\n elif mapping:\n stage = \"Mapping\"\n else:\n stage = \"Current Frame Optimization\"\n if not global_logging:\n stage = \"Per Iteration \" + stage\n\n if tracking:\n # Get list of gt poses\n gt_w2c_list = data['iter_gt_w2c_list']\n valid_gt_w2c_list = []\n \n # Get latest trajectory\n latest_est_w2c = data['w2c']\n latest_est_w2c_list = []\n latest_est_w2c_list.append(latest_est_w2c)\n valid_gt_w2c_list.append(gt_w2c_list[0])\n for idx in range(1, iter_time_idx+1):\n # Check if gt pose is not nan for this time step\n if torch.isnan(gt_w2c_list[idx]).sum() > 0:\n continue\n interm_cam_rot = F.normalize(params['cam_unnorm_rots'][..., idx].detach())\n interm_cam_trans = params['cam_trans'][..., idx].detach()\n intermrel_w2c = torch.eye(4).cuda().float()\n intermrel_w2c[:3, :3] = build_rotation(interm_cam_rot)\n intermrel_w2c[:3, 3] = interm_cam_trans\n latest_est_w2c = intermrel_w2c\n latest_est_w2c_list.append(latest_est_w2c)\n valid_gt_w2c_list.append(gt_w2c_list[idx])\n\n # Get latest gt pose\n gt_w2c_list = valid_gt_w2c_list\n iter_gt_w2c = gt_w2c_list[-1]\n # Get euclidean distance error between latest and gt pose\n iter_pt_error = torch.sqrt((latest_est_w2c[0,3] - iter_gt_w2c[0,3])**2 + (latest_est_w2c[1,3] - iter_gt_w2c[1,3])**2 + (latest_est_w2c[2,3] - iter_gt_w2c[2,3])**2)\n if iter_time_idx > 0:\n # Calculate relative pose error\n rel_gt_w2c = relative_transformation(gt_w2c_list[-2], gt_w2c_list[-1])\n rel_est_w2c = relative_transformation(latest_est_w2c_list[-2], latest_est_w2c_list[-1])\n rel_pt_error = torch.sqrt((rel_gt_w2c[0,3] - rel_est_w2c[0,3])**2 + (rel_gt_w2c[1,3] - rel_est_w2c[1,3])**2 + (rel_gt_w2c[2,3] - rel_est_w2c[2,3])**2)\n else:\n rel_pt_error = torch.zeros(1).float()\n \n # Calculate ATE RMSE\n ate_rmse = evaluate_ate(gt_w2c_list, latest_est_w2c_list)\n ate_rmse = np.round(ate_rmse, decimals=6)\n if wandb_run is not None:\n tracking_log = {f\"{stage}/Latest Pose Error\":iter_pt_error, \n f\"{stage}/Latest Relative Pose Error\":rel_pt_error,\n f\"{stage}/ATE RMSE\":ate_rmse}\n\n # Get current frame Gaussians\n transformed_pts = transform_to_frame(params, iter_time_idx, \n gaussians_grad=False,\n camera_grad=False)\n\n # Initialize Render Variables\n rendervar = transformed_params2rendervar(params, transformed_pts)\n depth_sil_rendervar = transformed_params2depthplussilhouette(params, data['w2c'], \n transformed_pts)\n depth_sil, _, _, = Renderer(raster_settings=data['cam'])(**depth_sil_rendervar)\n rastered_depth = depth_sil[0, :, :].unsqueeze(0)\n valid_depth_mask = (data['depth'] > 0)\n silhouette = depth_sil[1, :, :]\n presence_sil_mask = (silhouette > sil_thres)\n\n im, _, _, = Renderer(raster_settings=data['cam'])(**rendervar)\n if tracking:\n psnr = calc_psnr(im * presence_sil_mask, data['im'] * presence_sil_mask).mean()\n else:\n psnr = calc_psnr(im, data['im']).mean()\n\n if tracking:\n diff_depth_rmse = torch.sqrt((((rastered_depth - data['depth']) * presence_sil_mask) ** 2))\n diff_depth_rmse = diff_depth_rmse * valid_depth_mask\n rmse = diff_depth_rmse.sum() / valid_depth_mask.sum()\n diff_depth_l1 = torch.abs((rastered_depth - data['depth']) * presence_sil_mask)\n diff_depth_l1 = diff_depth_l1 * valid_depth_mask\n depth_l1 = diff_depth_l1.sum() / valid_depth_mask.sum()\n else:\n diff_depth_rmse = torch.sqrt((((rastered_depth - data['depth'])) ** 2))\n diff_depth_rmse = diff_depth_rmse * valid_depth_mask\n rmse = diff_depth_rmse.sum() / valid_depth_mask.sum()\n diff_depth_l1 = torch.abs((rastered_depth - data['depth']))\n diff_depth_l1 = diff_depth_l1 * valid_depth_mask\n depth_l1 = diff_depth_l1.sum() / valid_depth_mask.sum()\n\n if not (tracking or mapping):\n progress_bar.set_postfix({f\"Time-Step: {iter_time_idx} | PSNR: {psnr:.{7}} | Depth RMSE: {rmse:.{7}} | L1\": f\"{depth_l1:.{7}}\"})\n progress_bar.update(every_i)\n elif tracking:\n progress_bar.set_postfix({f\"Time-Step: {iter_time_idx} | Rel Pose Error: {rel_pt_error.item():.{7}} | Pose Error: {iter_pt_error.item():.{7}} | ATE RMSE\": f\"{ate_rmse.item():.{7}}\"})\n progress_bar.update(every_i)\n elif mapping:\n progress_bar.set_postfix({f\"Time-Step: {online_time_idx} | Frame {data['id']} | PSNR: {psnr:.{7}} | Depth RMSE: {rmse:.{7}} | L1\": f\"{depth_l1:.{7}}\"})\n progress_bar.update(every_i)\n \n if wandb_run is not None:\n wandb_log = {f\"{stage}/PSNR\": psnr,\n f\"{stage}/Depth RMSE\": rmse,\n f\"{stage}/Depth L1\": depth_l1,\n f\"{stage}/step\": wandb_step}\n if tracking:\n wandb_log = {**wandb_log, **tracking_log}\n wandb_run.log(wandb_log)\n \n if wandb_save_qual and (i % qual_every_i == 0 or i == 1):\n # Silhouette Mask\n presence_sil_mask = presence_sil_mask.detach().cpu().numpy()\n\n # Log plot to wandb\n if not mapping:\n fig_title = f\"Time-Step: {iter_time_idx} | Iter: {i} | Frame: {data['id']}\"\n else:\n fig_title = f\"Time-Step: {online_time_idx} | Iter: {i} | Frame: {data['id']}\"\n plot_rgbd_silhouette(data['im'], data['depth'], im, rastered_depth, presence_sil_mask, diff_depth_l1,\n psnr, depth_l1, fig_title, wandb_run=wandb_run, wandb_step=wandb_step, \n wandb_title=f\"{stage} Qual Viz\")" }, { "identifier": "keyframe_selection_overlap", "path": "utils/keyframe_selection.py", "snippet": "def keyframe_selection_overlap(gt_depth, w2c, intrinsics, keyframe_list, k, pixels=1600):\n \"\"\"\n Select overlapping keyframes to the current camera observation.\n\n Args:\n gt_depth (tensor): ground truth depth image of the current frame.\n w2c (tensor): world to camera matrix (4 x 4).\n keyframe_list (list): a list containing info for each keyframe.\n k (int): number of overlapping keyframes to select.\n pixels (int, optional): number of pixels to sparsely sample \n from the image of the current camera. Defaults to 1600.\n Returns:\n selected_keyframe_list (list): list of selected keyframe id.\n \"\"\"\n # Radomly Sample Pixel Indices from valid depth pixels\n width, height = gt_depth.shape[2], gt_depth.shape[1]\n valid_depth_indices = torch.where(gt_depth[0] > 0)\n valid_depth_indices = torch.stack(valid_depth_indices, dim=1)\n indices = torch.randint(valid_depth_indices.shape[0], (pixels,))\n sampled_indices = valid_depth_indices[indices]\n\n # Back Project the selected pixels to 3D Pointcloud\n pts = get_pointcloud(gt_depth, intrinsics, w2c, sampled_indices)\n\n list_keyframe = []\n for keyframeid, keyframe in enumerate(keyframe_list):\n # Get the estimated world2cam of the keyframe\n est_w2c = keyframe['est_w2c']\n # Transform the 3D pointcloud to the keyframe's camera space\n pts4 = torch.cat([pts, torch.ones_like(pts[:, :1])], dim=1)\n transformed_pts = (est_w2c @ pts4.T).T[:, :3]\n # Project the 3D pointcloud to the keyframe's image space\n points_2d = torch.matmul(intrinsics, transformed_pts.transpose(0, 1))\n points_2d = points_2d.transpose(0, 1)\n points_z = points_2d[:, 2:] + 1e-5\n points_2d = points_2d / points_z\n projected_pts = points_2d[:, :2]\n # Filter out the points that are outside the image\n edge = 20\n mask = (projected_pts[:, 0] < width-edge)*(projected_pts[:, 0] > edge) * \\\n (projected_pts[:, 1] < height-edge)*(projected_pts[:, 1] > edge)\n mask = mask & (points_z[:, 0] > 0)\n # Compute the percentage of points that are inside the image\n percent_inside = mask.sum()/projected_pts.shape[0]\n list_keyframe.append(\n {'id': keyframeid, 'percent_inside': percent_inside})\n\n # Sort the keyframes based on the percentage of points that are inside the image\n list_keyframe = sorted(\n list_keyframe, key=lambda i: i['percent_inside'], reverse=True)\n # Select the keyframes with percentage of points inside the image > 0\n selected_keyframe_list = [keyframe_dict['id']\n for keyframe_dict in list_keyframe if keyframe_dict['percent_inside'] > 0.0]\n selected_keyframe_list = list(np.random.permutation(\n np.array(selected_keyframe_list))[:k])\n\n return selected_keyframe_list" }, { "identifier": "setup_camera", "path": "utils/recon_helpers.py", "snippet": "def setup_camera(w, h, k, w2c, near=0.01, far=100):\n fx, fy, cx, cy = k[0][0], k[1][1], k[0][2], k[1][2]\n w2c = torch.tensor(w2c).cuda().float()\n cam_center = torch.inverse(w2c)[:3, 3]\n w2c = w2c.unsqueeze(0).transpose(1, 2)\n opengl_proj = torch.tensor([[2 * fx / w, 0.0, -(w - 2 * cx) / w, 0.0],\n [0.0, 2 * fy / h, -(h - 2 * cy) / h, 0.0],\n [0.0, 0.0, far / (far - near), -(far * near) / (far - near)],\n [0.0, 0.0, 1.0, 0.0]]).cuda().float().unsqueeze(0).transpose(1, 2)\n full_proj = w2c.bmm(opengl_proj)\n cam = Camera(\n image_height=h,\n image_width=w,\n tanfovx=w / (2 * fx),\n tanfovy=h / (2 * fy),\n bg=torch.tensor([0, 0, 0], dtype=torch.float32, device=\"cuda\"),\n scale_modifier=1.0,\n viewmatrix=w2c,\n projmatrix=full_proj,\n sh_degree=0,\n campos=cam_center,\n prefiltered=False\n )\n return cam" }, { "identifier": "build_rotation", "path": "utils/slam_external.py", "snippet": "def build_rotation(q):\n norm = torch.sqrt(q[:, 0] * q[:, 0] + q[:, 1] * q[:, 1] + q[:, 2] * q[:, 2] + q[:, 3] * q[:, 3])\n q = q / norm[:, None]\n rot = torch.zeros((q.size(0), 3, 3), device='cuda')\n r = q[:, 0]\n x = q[:, 1]\n y = q[:, 2]\n z = q[:, 3]\n rot[:, 0, 0] = 1 - 2 * (y * y + z * z)\n rot[:, 0, 1] = 2 * (x * y - r * z)\n rot[:, 0, 2] = 2 * (x * z + r * y)\n rot[:, 1, 0] = 2 * (x * y + r * z)\n rot[:, 1, 1] = 1 - 2 * (x * x + z * z)\n rot[:, 1, 2] = 2 * (y * z - r * x)\n rot[:, 2, 0] = 2 * (x * z - r * y)\n rot[:, 2, 1] = 2 * (y * z + r * x)\n rot[:, 2, 2] = 1 - 2 * (x * x + y * y)\n return rot" }, { "identifier": "prune_gaussians", "path": "utils/slam_external.py", "snippet": "def prune_gaussians(params, variables, optimizer, iter, prune_dict):\n if iter <= prune_dict['stop_after']:\n if (iter >= prune_dict['start_after']) and (iter % prune_dict['prune_every'] == 0):\n if iter == prune_dict['stop_after']:\n remove_threshold = prune_dict['final_removal_opacity_threshold']\n else:\n remove_threshold = prune_dict['removal_opacity_threshold']\n # Remove Gaussians with low opacity\n to_remove = (torch.sigmoid(params['logit_opacities']) < remove_threshold).squeeze()\n # Remove Gaussians that are too big\n if iter >= prune_dict['remove_big_after']:\n big_points_ws = torch.exp(params['log_scales']).max(dim=1).values > 0.1 * variables['scene_radius']\n to_remove = torch.logical_or(to_remove, big_points_ws)\n params, variables = remove_points(to_remove, params, variables, optimizer)\n torch.cuda.empty_cache()\n \n # Reset Opacities for all Gaussians\n if iter > 0 and iter % prune_dict['reset_opacities_every'] == 0 and prune_dict['reset_opacities']:\n new_params = {'logit_opacities': inverse_sigmoid(torch.ones_like(params['logit_opacities']) * 0.01)}\n params = update_params_and_optimizer(new_params, params, optimizer)\n \n return params, variables" }, { "identifier": "densify", "path": "utils/slam_external.py", "snippet": "def densify(params, variables, optimizer, iter, densify_dict):\n if iter <= densify_dict['stop_after']:\n variables = accumulate_mean2d_gradient(variables)\n grad_thresh = densify_dict['grad_thresh']\n if (iter >= densify_dict['start_after']) and (iter % densify_dict['densify_every'] == 0):\n grads = variables['means2D_gradient_accum'] / variables['denom']\n grads[grads.isnan()] = 0.0\n to_clone = torch.logical_and(grads >= grad_thresh, (\n torch.max(torch.exp(params['log_scales']), dim=1).values <= 0.01 * variables['scene_radius']))\n new_params = {k: v[to_clone] for k, v in params.items() if k not in ['cam_unnorm_rots', 'cam_trans']}\n params = cat_params_to_optimizer(new_params, params, optimizer)\n num_pts = params['means3D'].shape[0]\n\n padded_grad = torch.zeros(num_pts, device=\"cuda\")\n padded_grad[:grads.shape[0]] = grads\n to_split = torch.logical_and(padded_grad >= grad_thresh,\n torch.max(torch.exp(params['log_scales']), dim=1).values > 0.01 * variables[\n 'scene_radius'])\n n = densify_dict['num_to_split_into'] # number to split into\n new_params = {k: v[to_split].repeat(n, 1) for k, v in params.items() if k not in ['cam_unnorm_rots', 'cam_trans']}\n stds = torch.exp(params['log_scales'])[to_split].repeat(n, 3)\n means = torch.zeros((stds.size(0), 3), device=\"cuda\")\n samples = torch.normal(mean=means, std=stds)\n rots = build_rotation(params['unnorm_rotations'][to_split]).repeat(n, 1, 1)\n new_params['means3D'] += torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1)\n new_params['log_scales'] = torch.log(torch.exp(new_params['log_scales']) / (0.8 * n))\n params = cat_params_to_optimizer(new_params, params, optimizer)\n num_pts = params['means3D'].shape[0]\n\n variables['means2D_gradient_accum'] = torch.zeros(num_pts, device=\"cuda\")\n variables['denom'] = torch.zeros(num_pts, device=\"cuda\")\n variables['max_2D_radius'] = torch.zeros(num_pts, device=\"cuda\")\n to_remove = torch.cat((to_split, torch.zeros(n * to_split.sum(), dtype=torch.bool, device=\"cuda\")))\n params, variables = remove_points(to_remove, params, variables, optimizer)\n\n if iter == densify_dict['stop_after']:\n remove_threshold = densify_dict['final_removal_opacity_threshold']\n else:\n remove_threshold = densify_dict['removal_opacity_threshold']\n to_remove = (torch.sigmoid(params['logit_opacities']) < remove_threshold).squeeze()\n if iter >= densify_dict['remove_big_after']:\n big_points_ws = torch.exp(params['log_scales']).max(dim=1).values > 0.1 * variables['scene_radius']\n to_remove = torch.logical_or(to_remove, big_points_ws)\n params, variables = remove_points(to_remove, params, variables, optimizer)\n\n torch.cuda.empty_cache()\n\n # Reset Opacities for all Gaussians (This is not desired for mapping on only current frame)\n if iter > 0 and iter % densify_dict['reset_opacities_every'] == 0 and densify_dict['reset_opacities']:\n new_params = {'logit_opacities': inverse_sigmoid(torch.ones_like(params['logit_opacities']) * 0.01)}\n params = update_params_and_optimizer(new_params, params, optimizer)\n\n return params, variables" }, { "identifier": "get_loss", "path": "scripts/splatam.py", "snippet": "def get_loss(params, curr_data, variables, iter_time_idx, loss_weights, use_sil_for_loss,\n sil_thres, use_l1,ignore_outlier_depth_loss, tracking=False, \n mapping=False, do_ba=False, plot_dir=None, visualize_tracking_loss=False, tracking_iteration=None):\n # Initialize Loss Dictionary\n losses = {}\n\n if tracking:\n # Get current frame Gaussians, where only the camera pose gets gradient\n transformed_pts = transform_to_frame(params, iter_time_idx, \n gaussians_grad=False,\n camera_grad=True)\n elif mapping:\n if do_ba:\n # Get current frame Gaussians, where both camera pose and Gaussians get gradient\n transformed_pts = transform_to_frame(params, iter_time_idx,\n gaussians_grad=True,\n camera_grad=True)\n else:\n # Get current frame Gaussians, where only the Gaussians get gradient\n transformed_pts = transform_to_frame(params, iter_time_idx,\n gaussians_grad=True,\n camera_grad=False)\n else:\n # Get current frame Gaussians, where only the Gaussians get gradient\n transformed_pts = transform_to_frame(params, iter_time_idx,\n gaussians_grad=True,\n camera_grad=False)\n\n # Initialize Render Variables\n rendervar = transformed_params2rendervar(params, transformed_pts)\n depth_sil_rendervar = transformed_params2depthplussilhouette(params, curr_data['w2c'],\n transformed_pts)\n\n # RGB Rendering\n rendervar['means2D'].retain_grad()\n im, radius, _, = Renderer(raster_settings=curr_data['cam'])(**rendervar)\n variables['means2D'] = rendervar['means2D'] # Gradient only accum from colour render for densification\n\n # Depth & Silhouette Rendering\n depth_sil, _, _, = Renderer(raster_settings=curr_data['cam'])(**depth_sil_rendervar)\n depth = depth_sil[0, :, :].unsqueeze(0)\n silhouette = depth_sil[1, :, :]\n presence_sil_mask = (silhouette > sil_thres)\n depth_sq = depth_sil[2, :, :].unsqueeze(0)\n uncertainty = depth_sq - depth**2\n uncertainty = uncertainty.detach()\n\n # Mask with valid depth values (accounts for outlier depth values)\n nan_mask = (~torch.isnan(depth)) & (~torch.isnan(uncertainty))\n if ignore_outlier_depth_loss:\n depth_error = torch.abs(curr_data['depth'] - depth) * (curr_data['depth'] > 0)\n mask = (depth_error < 10*depth_error.median())\n mask = mask & (curr_data['depth'] > 0)\n else:\n mask = (curr_data['depth'] > 0)\n mask = mask & nan_mask\n # Mask with presence silhouette mask (accounts for empty space)\n if tracking and use_sil_for_loss:\n mask = mask & presence_sil_mask\n\n # Depth loss\n if use_l1:\n mask = mask.detach()\n if tracking:\n losses['depth'] = torch.abs(curr_data['depth'] - depth)[mask].sum()\n else:\n losses['depth'] = torch.abs(curr_data['depth'] - depth)[mask].mean()\n \n # RGB Loss\n if tracking and (use_sil_for_loss or ignore_outlier_depth_loss):\n color_mask = torch.tile(mask, (3, 1, 1))\n color_mask = color_mask.detach()\n losses['im'] = torch.abs(curr_data['im'] - im)[color_mask].sum()\n elif tracking:\n losses['im'] = torch.abs(curr_data['im'] - im).sum()\n else:\n losses['im'] = 0.8 * l1_loss_v1(im, curr_data['im']) + 0.2 * (1.0 - calc_ssim(im, curr_data['im']))\n\n # Visualize the Diff Images\n if tracking and visualize_tracking_loss:\n fig, ax = plt.subplots(2, 4, figsize=(12, 6))\n weighted_render_im = im * color_mask\n weighted_im = curr_data['im'] * color_mask\n weighted_render_depth = depth * mask\n weighted_depth = curr_data['depth'] * mask\n diff_rgb = torch.abs(weighted_render_im - weighted_im).mean(dim=0).detach().cpu()\n diff_depth = torch.abs(weighted_render_depth - weighted_depth).mean(dim=0).detach().cpu()\n viz_img = torch.clip(weighted_im.permute(1, 2, 0).detach().cpu(), 0, 1)\n ax[0, 0].imshow(viz_img)\n ax[0, 0].set_title(\"Weighted GT RGB\")\n viz_render_img = torch.clip(weighted_render_im.permute(1, 2, 0).detach().cpu(), 0, 1)\n ax[1, 0].imshow(viz_render_img)\n ax[1, 0].set_title(\"Weighted Rendered RGB\")\n ax[0, 1].imshow(weighted_depth[0].detach().cpu(), cmap=\"jet\", vmin=0, vmax=6)\n ax[0, 1].set_title(\"Weighted GT Depth\")\n ax[1, 1].imshow(weighted_render_depth[0].detach().cpu(), cmap=\"jet\", vmin=0, vmax=6)\n ax[1, 1].set_title(\"Weighted Rendered Depth\")\n ax[0, 2].imshow(diff_rgb, cmap=\"jet\", vmin=0, vmax=0.8)\n ax[0, 2].set_title(f\"Diff RGB, Loss: {torch.round(losses['im'])}\")\n ax[1, 2].imshow(diff_depth, cmap=\"jet\", vmin=0, vmax=0.8)\n ax[1, 2].set_title(f\"Diff Depth, Loss: {torch.round(losses['depth'])}\")\n ax[0, 3].imshow(presence_sil_mask.detach().cpu(), cmap=\"gray\")\n ax[0, 3].set_title(\"Silhouette Mask\")\n ax[1, 3].imshow(mask[0].detach().cpu(), cmap=\"gray\")\n ax[1, 3].set_title(\"Loss Mask\")\n # Turn off axis\n for i in range(2):\n for j in range(4):\n ax[i, j].axis('off')\n # Set Title\n fig.suptitle(f\"Tracking Iteration: {tracking_iteration}\", fontsize=16)\n # Figure Tight Layout\n fig.tight_layout()\n os.makedirs(plot_dir, exist_ok=True)\n plt.savefig(os.path.join(plot_dir, f\"tmp.png\"), bbox_inches='tight')\n plt.close()\n plot_img = cv2.imread(os.path.join(plot_dir, f\"tmp.png\"))\n cv2.imshow('Diff Images', plot_img)\n cv2.waitKey(1)\n ## Save Tracking Loss Viz\n # save_plot_dir = os.path.join(plot_dir, f\"tracking_%04d\" % iter_time_idx)\n # os.makedirs(save_plot_dir, exist_ok=True)\n # plt.savefig(os.path.join(save_plot_dir, f\"%04d.png\" % tracking_iteration), bbox_inches='tight')\n # plt.close()\n\n weighted_losses = {k: v * loss_weights[k] for k, v in losses.items()}\n loss = sum(weighted_losses.values())\n\n seen = radius > 0\n variables['max_2D_radius'][seen] = torch.max(radius[seen], variables['max_2D_radius'][seen])\n variables['seen'] = seen\n weighted_losses['loss'] = loss\n\n return loss, variables, weighted_losses" }, { "identifier": "initialize_optimizer", "path": "scripts/splatam.py", "snippet": "def initialize_optimizer(params, lrs_dict, tracking):\n lrs = lrs_dict\n param_groups = [{'params': [v], 'name': k, 'lr': lrs[k]} for k, v in params.items()]\n if tracking:\n return torch.optim.Adam(param_groups)\n else:\n return torch.optim.Adam(param_groups, lr=0.0, eps=1e-15)" }, { "identifier": "initialize_params", "path": "scripts/splatam.py", "snippet": "def initialize_params(init_pt_cld, num_frames, mean3_sq_dist):\n num_pts = init_pt_cld.shape[0]\n means3D = init_pt_cld[:, :3] # [num_gaussians, 3]\n unnorm_rots = np.tile([1, 0, 0, 0], (num_pts, 1)) # [num_gaussians, 3]\n logit_opacities = torch.zeros((num_pts, 1), dtype=torch.float, device=\"cuda\")\n params = {\n 'means3D': means3D,\n 'rgb_colors': init_pt_cld[:, 3:6],\n 'unnorm_rotations': unnorm_rots,\n 'logit_opacities': logit_opacities,\n 'log_scales': torch.tile(torch.log(torch.sqrt(mean3_sq_dist))[..., None], (1, 1)),\n }\n\n # Initialize a single gaussian trajectory to model the camera poses relative to the first frame\n cam_rots = np.tile([1, 0, 0, 0], (1, 1))\n cam_rots = np.tile(cam_rots[:, :, None], (1, 1, num_frames))\n params['cam_unnorm_rots'] = cam_rots\n params['cam_trans'] = np.zeros((1, 3, num_frames))\n\n for k, v in params.items():\n # Check if value is already a torch tensor\n if not isinstance(v, torch.Tensor):\n params[k] = torch.nn.Parameter(torch.tensor(v).cuda().float().contiguous().requires_grad_(True))\n else:\n params[k] = torch.nn.Parameter(v.cuda().float().contiguous().requires_grad_(True))\n\n variables = {'max_2D_radius': torch.zeros(params['means3D'].shape[0]).cuda().float(),\n 'means2D_gradient_accum': torch.zeros(params['means3D'].shape[0]).cuda().float(),\n 'denom': torch.zeros(params['means3D'].shape[0]).cuda().float(),\n 'timestep': torch.zeros(params['means3D'].shape[0]).cuda().float()}\n\n return params, variables" }, { "identifier": "initialize_camera_pose", "path": "scripts/splatam.py", "snippet": "def initialize_camera_pose(params, curr_time_idx, forward_prop):\n with torch.no_grad():\n if curr_time_idx > 1 and forward_prop:\n # Initialize the camera pose for the current frame based on a constant velocity model\n # Rotation\n prev_rot1 = F.normalize(params['cam_unnorm_rots'][..., curr_time_idx-1].detach())\n prev_rot2 = F.normalize(params['cam_unnorm_rots'][..., curr_time_idx-2].detach())\n new_rot = F.normalize(prev_rot1 + (prev_rot1 - prev_rot2))\n params['cam_unnorm_rots'][..., curr_time_idx] = new_rot.detach()\n # Translation\n prev_tran1 = params['cam_trans'][..., curr_time_idx-1].detach()\n prev_tran2 = params['cam_trans'][..., curr_time_idx-2].detach()\n new_tran = prev_tran1 + (prev_tran1 - prev_tran2)\n params['cam_trans'][..., curr_time_idx] = new_tran.detach()\n else:\n # Initialize the camera pose for the current frame\n params['cam_unnorm_rots'][..., curr_time_idx] = params['cam_unnorm_rots'][..., curr_time_idx-1].detach()\n params['cam_trans'][..., curr_time_idx] = params['cam_trans'][..., curr_time_idx-1].detach()\n \n return params" }, { "identifier": "get_pointcloud", "path": "scripts/splatam.py", "snippet": "def get_pointcloud(color, depth, intrinsics, w2c, transform_pts=True, \n mask=None, compute_mean_sq_dist=False, mean_sq_dist_method=\"projective\"):\n width, height = color.shape[2], color.shape[1]\n CX = intrinsics[0][2]\n CY = intrinsics[1][2]\n FX = intrinsics[0][0]\n FY = intrinsics[1][1]\n\n # Compute indices of pixels\n x_grid, y_grid = torch.meshgrid(torch.arange(width).cuda().float(), \n torch.arange(height).cuda().float(),\n indexing='xy')\n xx = (x_grid - CX)/FX\n yy = (y_grid - CY)/FY\n xx = xx.reshape(-1)\n yy = yy.reshape(-1)\n depth_z = depth[0].reshape(-1)\n\n # Initialize point cloud\n pts_cam = torch.stack((xx * depth_z, yy * depth_z, depth_z), dim=-1)\n if transform_pts:\n pix_ones = torch.ones(height * width, 1).cuda().float()\n pts4 = torch.cat((pts_cam, pix_ones), dim=1)\n c2w = torch.inverse(w2c)\n pts = (c2w @ pts4.T).T[:, :3]\n else:\n pts = pts_cam\n\n # Compute mean squared distance for initializing the scale of the Gaussians\n if compute_mean_sq_dist:\n if mean_sq_dist_method == \"projective\":\n # Projective Geometry (this is fast, farther -> larger radius)\n scale_gaussian = depth_z / ((FX + FY)/2)\n mean3_sq_dist = scale_gaussian**2\n else:\n raise ValueError(f\"Unknown mean_sq_dist_method {mean_sq_dist_method}\")\n \n # Colorize point cloud\n cols = torch.permute(color, (1, 2, 0)).reshape(-1, 3) # (C, H, W) -> (H, W, C) -> (H * W, C)\n point_cld = torch.cat((pts, cols), -1)\n\n # Select points based on mask\n if mask is not None:\n point_cld = point_cld[mask]\n if compute_mean_sq_dist:\n mean3_sq_dist = mean3_sq_dist[mask]\n\n if compute_mean_sq_dist:\n return point_cld, mean3_sq_dist\n else:\n return point_cld" }, { "identifier": "add_new_gaussians", "path": "scripts/splatam.py", "snippet": "def add_new_gaussians(params, variables, curr_data, sil_thres, time_idx, mean_sq_dist_method):\n # Silhouette Rendering\n transformed_pts = transform_to_frame(params, time_idx, gaussians_grad=False, camera_grad=False)\n depth_sil_rendervar = transformed_params2depthplussilhouette(params, curr_data['w2c'],\n transformed_pts)\n depth_sil, _, _, = Renderer(raster_settings=curr_data['cam'])(**depth_sil_rendervar)\n silhouette = depth_sil[1, :, :]\n non_presence_sil_mask = (silhouette < sil_thres)\n # Check for new foreground objects by using GT depth\n gt_depth = curr_data['depth'][0, :, :]\n render_depth = depth_sil[0, :, :]\n depth_error = torch.abs(gt_depth - render_depth) * (gt_depth > 0)\n non_presence_depth_mask = (render_depth > gt_depth) * (depth_error > 50*depth_error.median())\n # Determine non-presence mask\n non_presence_mask = non_presence_sil_mask | non_presence_depth_mask\n # Flatten mask\n non_presence_mask = non_presence_mask.reshape(-1)\n\n # Get the new frame Gaussians based on the Silhouette\n if torch.sum(non_presence_mask) > 0:\n # Get the new pointcloud in the world frame\n curr_cam_rot = torch.nn.functional.normalize(params['cam_unnorm_rots'][..., time_idx].detach())\n curr_cam_tran = params['cam_trans'][..., time_idx].detach()\n curr_w2c = torch.eye(4).cuda().float()\n curr_w2c[:3, :3] = build_rotation(curr_cam_rot)\n curr_w2c[:3, 3] = curr_cam_tran\n valid_depth_mask = (curr_data['depth'][0, :, :] > 0)\n non_presence_mask = non_presence_mask & valid_depth_mask.reshape(-1)\n new_pt_cld, mean3_sq_dist = get_pointcloud(curr_data['im'], curr_data['depth'], curr_data['intrinsics'], \n curr_w2c, mask=non_presence_mask, compute_mean_sq_dist=True,\n mean_sq_dist_method=mean_sq_dist_method)\n new_params = initialize_new_params(new_pt_cld, mean3_sq_dist)\n for k, v in new_params.items():\n params[k] = torch.nn.Parameter(torch.cat((params[k], v), dim=0).requires_grad_(True))\n num_pts = params['means3D'].shape[0]\n variables['means2D_gradient_accum'] = torch.zeros(num_pts, device=\"cuda\").float()\n variables['denom'] = torch.zeros(num_pts, device=\"cuda\").float()\n variables['max_2D_radius'] = torch.zeros(num_pts, device=\"cuda\").float()\n new_timestep = time_idx*torch.ones(new_pt_cld.shape[0],device=\"cuda\").float()\n variables['timestep'] = torch.cat((variables['timestep'],new_timestep),dim=0)\n\n return params, variables" } ]
import argparse import os import shutil import sys import time import json import cv2 import matplotlib.pyplot as plt import numpy as np import torch import torch.nn.functional as F import cyclonedds.idl as idl import cyclonedds.idl.annotations as annotate import cyclonedds.idl.types as types from pathlib import Path from importlib.machinery import SourceFileLoader from tqdm import tqdm from datasets.gradslam_datasets.geometryutils import relative_transformation from utils.common_utils import seed_everything, save_params_ckpt, save_params from utils.eval_helpers import report_progress from utils.keyframe_selection import keyframe_selection_overlap from utils.recon_helpers import setup_camera from utils.slam_external import build_rotation, prune_gaussians, densify from scripts.splatam import get_loss, initialize_optimizer, initialize_params, initialize_camera_pose, get_pointcloud, add_new_gaussians from diff_gaussian_rasterization import GaussianRasterizer as Renderer from dataclasses import dataclass from cyclonedds.domain import DomainParticipant, Domain from cyclonedds.core import Qos, Policy from cyclonedds.sub import DataReader from cyclonedds.topic import Topic from cyclonedds.util import duration
12,930
# Depth if avaiable save_depth = None if sample.has_depth: # Save Depth Image save_depth = np.asarray(sample.depth_image, dtype=np.uint8).view( dtype=np.float32).reshape((sample.depth_height, sample.depth_width)) save_depth = (save_depth*65535/float(depth_scale)).astype(np.uint16) save_depth = cv2.resize(save_depth, dsize=( sample.width, sample.height), interpolation=cv2.INTER_NEAREST) cv2.imwrite(str(depth_dir.joinpath(f"{total_frames}.png")), save_depth) # Load Depth Image for SplaTAM curr_depth = np.asarray(sample.depth_image, dtype=np.uint8).view( dtype=np.float32).reshape((sample.depth_height, sample.depth_width)) else: print("No Depth Image Received. Please make sure that the NeRFCapture App \ mentions Depth Supported on the top right corner. Skipping Frame...") continue # ARKit Poses for saving dataset X_WV = np.asarray(sample.transform_matrix, dtype=np.float32).reshape((4, 4)).T frame = { "transform_matrix": X_WV.tolist(), "file_path": f"rgb/{total_frames}.png", "fl_x": sample.fl_x, "fl_y": sample.fl_y, "cx": sample.cx, "cy": sample.cy, "w": sample.width, "h": sample.height } if save_depth is not None: frame["depth_path"] = f"depth/{total_frames}.png" manifest["frames"].append(frame) # Convert ARKit Pose to GradSLAM format gt_pose = torch.from_numpy(X_WV).float() gt_pose = P @ gt_pose @ P.T if time_idx == 0: first_abs_gt_pose = gt_pose gt_pose = relative_transformation(first_abs_gt_pose.unsqueeze(0), gt_pose.unsqueeze(0), orthogonal_rotations=False) gt_w2c = torch.linalg.inv(gt_pose[0]) gt_w2c_all_frames.append(gt_w2c) # Initialize Tracking & Mapping Resolution Data color = cv2.resize(image, dsize=( config['data']['desired_image_width'], config['data']['desired_image_height']), interpolation=cv2.INTER_LINEAR) depth = cv2.resize(curr_depth, dsize=( config['data']['desired_image_width'], config['data']['desired_image_height']), interpolation=cv2.INTER_NEAREST) depth = np.expand_dims(depth, -1) color = torch.from_numpy(color).cuda().float() color = color.permute(2, 0, 1) / 255 depth = torch.from_numpy(depth).cuda().float() depth = depth.permute(2, 0, 1) if time_idx == 0: intrinsics = torch.tensor([[sample.fl_x, 0, sample.cx], [0, sample.fl_y, sample.cy], [0, 0, 1]]).cuda().float() intrinsics = intrinsics / config['data']['downscale_factor'] intrinsics[2, 2] = 1.0 first_frame_w2c = torch.eye(4).cuda().float() cam = setup_camera(color.shape[2], color.shape[1], intrinsics.cpu().numpy(), first_frame_w2c.cpu().numpy()) # Initialize Densification Resolution Data densify_color = cv2.resize(image, dsize=( config['data']['densification_image_width'], config['data']['densification_image_height']), interpolation=cv2.INTER_LINEAR) densify_depth = cv2.resize(curr_depth, dsize=( config['data']['densification_image_width'], config['data']['densification_image_height']), interpolation=cv2.INTER_NEAREST) densify_depth = np.expand_dims(densify_depth, -1) densify_color = torch.from_numpy(densify_color).cuda().float() densify_color = densify_color.permute(2, 0, 1) / 255 densify_depth = torch.from_numpy(densify_depth).cuda().float() densify_depth = densify_depth.permute(2, 0, 1) if time_idx == 0: densify_intrinsics = torch.tensor([[sample.fl_x, 0, sample.cx], [0, sample.fl_y, sample.cy], [0, 0, 1]]).cuda().float() densify_intrinsics = densify_intrinsics / config['data']['densify_downscale_factor'] densify_intrinsics[2, 2] = 1.0 densify_cam = setup_camera(densify_color.shape[2], densify_color.shape[1], densify_intrinsics.cpu().numpy(), first_frame_w2c.cpu().numpy()) # Initialize Params for first time step if time_idx == 0: # Get Initial Point Cloud mask = (densify_depth > 0) # Mask out invalid depth values mask = mask.reshape(-1) init_pt_cld, mean3_sq_dist = get_pointcloud(densify_color, densify_depth, densify_intrinsics, first_frame_w2c, mask=mask, compute_mean_sq_dist=True, mean_sq_dist_method=config['mean_sq_dist_method']) params, variables = initialize_params(init_pt_cld, num_frames, mean3_sq_dist) variables['scene_radius'] = torch.max(densify_depth)/config['scene_radius_depth_ratio'] # Initialize Mapping & Tracking for current frame iter_time_idx = time_idx curr_gt_w2c = gt_w2c_all_frames curr_data = {'cam': cam, 'im': color, 'depth':depth, 'id': iter_time_idx, 'intrinsics': intrinsics, 'w2c': first_frame_w2c, 'iter_gt_w2c_list': curr_gt_w2c} tracking_curr_data = curr_data # Optimization Iterations num_iters_mapping = config['mapping']['num_iters'] # Initialize the camera pose for the current frame if time_idx > 0: params = initialize_camera_pose(params, time_idx, forward_prop=config['tracking']['forward_prop']) # Tracking tracking_start_time = time.time() if time_idx > 0 and not config['tracking']['use_gt_poses']: # Reset Optimizer & Learning Rates for tracking optimizer = initialize_optimizer(params, config['tracking']['lrs'], tracking=True) # Keep Track of Best Candidate Rotation & Translation candidate_cam_unnorm_rot = params['cam_unnorm_rots'][..., time_idx].detach().clone() candidate_cam_tran = params['cam_trans'][..., time_idx].detach().clone() current_min_loss = float(1e20) # Tracking Optimization iter = 0 do_continue_slam = False num_iters_tracking = config['tracking']['num_iters'] progress_bar = tqdm(range(num_iters_tracking), desc=f"Tracking Time Step: {time_idx}") while True: iter_start_time = time.time() # Loss for current frame
""" Script to stream RGB-D data from the NeRFCapture iOS App & build a Gaussian Splat on the fly using SplaTAM. The CycloneDDS parts of this script are adapted from the Instant-NGP Repo: https://github.com/NVlabs/instant-ngp/blob/master/scripts/nerfcapture2nerf.py """ #!/usr/bin/env python3 _BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, _BASE_DIR) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--config", default="./configs/iphone/online_demo.py", type=str, help="Path to config file.") return parser.parse_args() # DDS # ================================================================================================== @dataclass @annotate.final @annotate.autoid("sequential") class SplatCaptureFrame(idl.IdlStruct, typename="SplatCaptureData.SplatCaptureFrame"): id: types.uint32 annotate.key("id") timestamp: types.float64 fl_x: types.float32 fl_y: types.float32 cx: types.float32 cy: types.float32 transform_matrix: types.array[types.float32, 16] width: types.uint32 height: types.uint32 image: types.sequence[types.uint8] has_depth: bool depth_width: types.uint32 depth_height: types.uint32 depth_scale: types.float32 depth_image: types.sequence[types.uint8] dds_config = """<?xml version="1.0" encoding="UTF-8" ?> \ <CycloneDDS xmlns="https://cdds.io/config" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://cdds.io/config https://raw.githubusercontent.com/eclipse-cyclonedds/cyclonedds/master/etc/cyclonedds.xsd"> \ <Domain id="any"> \ <Internal> \ <MinimumSocketReceiveBufferSize>10MB</MinimumSocketReceiveBufferSize> \ </Internal> \ <Tracing> \ <Verbosity>config</Verbosity> \ <OutputFile>stdout</OutputFile> \ </Tracing> \ </Domain> \ </CycloneDDS> \ """ # ================================================================================================== def dataset_capture_loop(reader: DataReader, save_path: Path, overwrite: bool, n_frames: int, depth_scale: float, config: dict): rgb_path = save_path.joinpath("rgb") if rgb_path.exists(): if overwrite: # Prompt user to confirm deletion if (input(f"warning! folder '{save_path}' will be deleted/replaced. continue? (Y/n)").lower().strip()+"y")[:1] != "y": sys.exit(1) shutil.rmtree(save_path) else: print(f"rgb_path {rgb_path} already exists. Please use overwrite=True in config if you want to overwrite.") sys.exit(1) print("Waiting for frames...") # Make directory images_dir = save_path.joinpath("rgb") manifest = { "fl_x": 0.0, "fl_y": 0.0, "cx": 0.0, "cy": 0.0, "w": 0.0, "h": 0.0, "frames": [] } total_frames = 0 # Total frames received time_idx = total_frames num_frames = n_frames # Total frames desired # Initialize list to keep track of Keyframes keyframe_list = [] keyframe_time_indices = [] # Init Variables to keep track of ARkit poses and runtimes gt_w2c_all_frames = [] tracking_iter_time_sum = 0 tracking_iter_time_count = 0 mapping_iter_time_sum = 0 mapping_iter_time_count = 0 tracking_frame_time_sum = 0 tracking_frame_time_count = 0 mapping_frame_time_sum = 0 mapping_frame_time_count = 0 P = torch.tensor( [ [1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1] ] ).float() # Start DDS Loop while True: sample = reader.read_next() # Get frame from NeRFCapture if sample: print(f"{total_frames + 1}/{n_frames} frames received") if total_frames == 0: save_path.mkdir(parents=True, exist_ok=True) images_dir.mkdir(exist_ok=True) manifest["w"] = sample.width manifest["h"] = sample.height manifest["cx"] = sample.cx manifest["cy"] = sample.cy manifest["fl_x"] = sample.fl_x manifest["fl_y"] = sample.fl_y manifest["integer_depth_scale"] = float(depth_scale)/65535.0 if sample.has_depth: depth_dir = save_path.joinpath("depth") depth_dir.mkdir(exist_ok=True) # RGB image = np.asarray(sample.image, dtype=np.uint8).reshape((sample.height, sample.width, 3)) cv2.imwrite(str(images_dir.joinpath(f"{total_frames}.png")), cv2.cvtColor(image, cv2.COLOR_RGB2BGR)) # Depth if avaiable save_depth = None if sample.has_depth: # Save Depth Image save_depth = np.asarray(sample.depth_image, dtype=np.uint8).view( dtype=np.float32).reshape((sample.depth_height, sample.depth_width)) save_depth = (save_depth*65535/float(depth_scale)).astype(np.uint16) save_depth = cv2.resize(save_depth, dsize=( sample.width, sample.height), interpolation=cv2.INTER_NEAREST) cv2.imwrite(str(depth_dir.joinpath(f"{total_frames}.png")), save_depth) # Load Depth Image for SplaTAM curr_depth = np.asarray(sample.depth_image, dtype=np.uint8).view( dtype=np.float32).reshape((sample.depth_height, sample.depth_width)) else: print("No Depth Image Received. Please make sure that the NeRFCapture App \ mentions Depth Supported on the top right corner. Skipping Frame...") continue # ARKit Poses for saving dataset X_WV = np.asarray(sample.transform_matrix, dtype=np.float32).reshape((4, 4)).T frame = { "transform_matrix": X_WV.tolist(), "file_path": f"rgb/{total_frames}.png", "fl_x": sample.fl_x, "fl_y": sample.fl_y, "cx": sample.cx, "cy": sample.cy, "w": sample.width, "h": sample.height } if save_depth is not None: frame["depth_path"] = f"depth/{total_frames}.png" manifest["frames"].append(frame) # Convert ARKit Pose to GradSLAM format gt_pose = torch.from_numpy(X_WV).float() gt_pose = P @ gt_pose @ P.T if time_idx == 0: first_abs_gt_pose = gt_pose gt_pose = relative_transformation(first_abs_gt_pose.unsqueeze(0), gt_pose.unsqueeze(0), orthogonal_rotations=False) gt_w2c = torch.linalg.inv(gt_pose[0]) gt_w2c_all_frames.append(gt_w2c) # Initialize Tracking & Mapping Resolution Data color = cv2.resize(image, dsize=( config['data']['desired_image_width'], config['data']['desired_image_height']), interpolation=cv2.INTER_LINEAR) depth = cv2.resize(curr_depth, dsize=( config['data']['desired_image_width'], config['data']['desired_image_height']), interpolation=cv2.INTER_NEAREST) depth = np.expand_dims(depth, -1) color = torch.from_numpy(color).cuda().float() color = color.permute(2, 0, 1) / 255 depth = torch.from_numpy(depth).cuda().float() depth = depth.permute(2, 0, 1) if time_idx == 0: intrinsics = torch.tensor([[sample.fl_x, 0, sample.cx], [0, sample.fl_y, sample.cy], [0, 0, 1]]).cuda().float() intrinsics = intrinsics / config['data']['downscale_factor'] intrinsics[2, 2] = 1.0 first_frame_w2c = torch.eye(4).cuda().float() cam = setup_camera(color.shape[2], color.shape[1], intrinsics.cpu().numpy(), first_frame_w2c.cpu().numpy()) # Initialize Densification Resolution Data densify_color = cv2.resize(image, dsize=( config['data']['densification_image_width'], config['data']['densification_image_height']), interpolation=cv2.INTER_LINEAR) densify_depth = cv2.resize(curr_depth, dsize=( config['data']['densification_image_width'], config['data']['densification_image_height']), interpolation=cv2.INTER_NEAREST) densify_depth = np.expand_dims(densify_depth, -1) densify_color = torch.from_numpy(densify_color).cuda().float() densify_color = densify_color.permute(2, 0, 1) / 255 densify_depth = torch.from_numpy(densify_depth).cuda().float() densify_depth = densify_depth.permute(2, 0, 1) if time_idx == 0: densify_intrinsics = torch.tensor([[sample.fl_x, 0, sample.cx], [0, sample.fl_y, sample.cy], [0, 0, 1]]).cuda().float() densify_intrinsics = densify_intrinsics / config['data']['densify_downscale_factor'] densify_intrinsics[2, 2] = 1.0 densify_cam = setup_camera(densify_color.shape[2], densify_color.shape[1], densify_intrinsics.cpu().numpy(), first_frame_w2c.cpu().numpy()) # Initialize Params for first time step if time_idx == 0: # Get Initial Point Cloud mask = (densify_depth > 0) # Mask out invalid depth values mask = mask.reshape(-1) init_pt_cld, mean3_sq_dist = get_pointcloud(densify_color, densify_depth, densify_intrinsics, first_frame_w2c, mask=mask, compute_mean_sq_dist=True, mean_sq_dist_method=config['mean_sq_dist_method']) params, variables = initialize_params(init_pt_cld, num_frames, mean3_sq_dist) variables['scene_radius'] = torch.max(densify_depth)/config['scene_radius_depth_ratio'] # Initialize Mapping & Tracking for current frame iter_time_idx = time_idx curr_gt_w2c = gt_w2c_all_frames curr_data = {'cam': cam, 'im': color, 'depth':depth, 'id': iter_time_idx, 'intrinsics': intrinsics, 'w2c': first_frame_w2c, 'iter_gt_w2c_list': curr_gt_w2c} tracking_curr_data = curr_data # Optimization Iterations num_iters_mapping = config['mapping']['num_iters'] # Initialize the camera pose for the current frame if time_idx > 0: params = initialize_camera_pose(params, time_idx, forward_prop=config['tracking']['forward_prop']) # Tracking tracking_start_time = time.time() if time_idx > 0 and not config['tracking']['use_gt_poses']: # Reset Optimizer & Learning Rates for tracking optimizer = initialize_optimizer(params, config['tracking']['lrs'], tracking=True) # Keep Track of Best Candidate Rotation & Translation candidate_cam_unnorm_rot = params['cam_unnorm_rots'][..., time_idx].detach().clone() candidate_cam_tran = params['cam_trans'][..., time_idx].detach().clone() current_min_loss = float(1e20) # Tracking Optimization iter = 0 do_continue_slam = False num_iters_tracking = config['tracking']['num_iters'] progress_bar = tqdm(range(num_iters_tracking), desc=f"Tracking Time Step: {time_idx}") while True: iter_start_time = time.time() # Loss for current frame
loss, variables, losses = get_loss(params, tracking_curr_data, variables, iter_time_idx, config['tracking']['loss_weights'],
10
2023-11-30 20:26:47+00:00
16k
zhyever/PatchFusion
ControlNet/ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ControlNet/ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ControlNet/ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ControlNet/ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ControlNet/ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ControlNet/ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ControlNet/ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ControlNet/ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ControlNet/ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ControlNet/ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ControlNet/ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ControlNet/ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "IdentityFirstStage", "path": "ControlNet/ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ControlNet/ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False\n ):\n super().__init__()\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(\n self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n if log_ema or self.use_ema:\n with self.ema_scope():\n xrec_ema, posterior_ema = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec_ema.shape[1] > 3\n xrec_ema = self.to_rgb(xrec_ema)\n log[\"samples_ema\"] = self.decode(torch.randn_like(posterior_ema.sample()))\n log[\"reconstructions_ema\"] = xrec_ema\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ControlNet/ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ControlNet/ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ControlNet/ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ControlNet/ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" } ]
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from ControlNet.ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ControlNet.ldm.modules.ema import LitEma from ControlNet.ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ControlNet.ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ControlNet.ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ControlNet.ldm.models.diffusion.ddim import DDIMSampler
12,180
log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, force_null_conditioning=False, *args, **kwargs): self.force_null_conditioning = force_null_conditioning self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning: conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) reset_ema = kwargs.pop("reset_ema", False) reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True if reset_ema: assert self.use_ema print( f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config):
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) else: self.register_buffer('logvar', logvar) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_v(self, x, noise, t): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x ) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, force_null_conditioning=False, *args, **kwargs): self.force_null_conditioning = force_null_conditioning self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning: conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) reset_ema = kwargs.pop("reset_ema", False) reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True if reset_ema: assert self.use_ema print( f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config):
model = instantiate_from_config(config)
7
2023-12-04 08:43:15+00:00
16k
baaivision/GeoDream
extern/ldm_zero123/models/diffusion/ddpm.py
[ { "identifier": "AutoencoderKL", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(\n self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2 * ddconfig[\"z_channels\"], 2 * embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels) == int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n self.log(\n \"aeloss\",\n aeloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n\n self.log(\n \"discloss\",\n discloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n 0,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n 1,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(\n list(self.encoder.parameters())\n + list(self.decoder.parameters())\n + list(self.quant_conv.parameters())\n + list(self.post_quant_conv.parameters()),\n lr=lr,\n betas=(0.5, 0.9),\n )\n opt_disc = torch.optim.Adam(\n self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9)\n )\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0\n return x" }, { "identifier": "IdentityFirstStage", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "VQModelInterface", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "DDIMSampler", "path": "extern/ldm_zero123/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def to(self, device):\n \"\"\"Same as to in torch module\n Don't really underestand why this isn't a module in the first place\"\"\"\n for k, v in self.__dict__.items():\n if isinstance(v, torch.Tensor):\n new_v = getattr(self, k).to(device)\n setattr(self, k, new_v)\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(\n self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0.0, verbose=True\n ):\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n verbose=verbose,\n )\n alphas_cumprod = self.model.alphas_cumprod\n assert (\n alphas_cumprod.shape[0] == self.ddpm_num_timesteps\n ), \"alphas have to be defined for each timestep\"\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer(\"betas\", to_torch(self.model.betas))\n self.register_buffer(\"alphas_cumprod\", to_torch(alphas_cumprod))\n self.register_buffer(\n \"alphas_cumprod_prev\", to_torch(self.model.alphas_cumprod_prev)\n )\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer(\n \"sqrt_alphas_cumprod\", to_torch(np.sqrt(alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_one_minus_alphas_cumprod\",\n to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())),\n )\n self.register_buffer(\n \"log_one_minus_alphas_cumprod\", to_torch(np.log(1.0 - alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recip_alphas_cumprod\", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recipm1_alphas_cumprod\",\n to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)),\n )\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n verbose=verbose,\n )\n self.register_buffer(\"ddim_sigmas\", ddim_sigmas)\n self.register_buffer(\"ddim_alphas\", ddim_alphas)\n self.register_buffer(\"ddim_alphas_prev\", ddim_alphas_prev)\n self.register_buffer(\"ddim_sqrt_one_minus_alphas\", np.sqrt(1.0 - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev)\n / (1 - self.alphas_cumprod)\n * (1 - self.alphas_cumprod / self.alphas_cumprod_prev)\n )\n self.register_buffer(\n \"ddim_sigmas_for_original_num_steps\", sigmas_for_original_sampling_steps\n )\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.0,\n mask=None,\n x0=None,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n **kwargs,\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list):\n ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(\n f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\"\n )\n\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\"\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(\n self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n t_start=-1,\n ):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = (\n self.ddpm_num_timesteps\n if ddim_use_original_steps\n else self.ddim_timesteps\n )\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = (\n int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]\n )\n - 1\n )\n timesteps = self.ddim_timesteps[:subset_end]\n\n timesteps = timesteps[:t_start]\n\n intermediates = {\"x_inter\": [img], \"pred_x0\": [img]}\n time_range = (\n reversed(range(0, timesteps))\n if ddim_use_original_steps\n else np.flip(timesteps)\n )\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"DDIM Sampler\", total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts\n ) # TODO: deterministic forward pass?\n img = img_orig * mask + (1.0 - mask) * img\n\n outs = self.p_sample_ddim(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n img, pred_x0 = outs\n if callback:\n img = callback(i, img, pred_x0)\n if img_callback:\n img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates[\"x_inter\"].append(img)\n intermediates[\"pred_x0\"].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(\n self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n ):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.0:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [\n torch.cat([unconditional_conditioning[k][i], c[k][i]])\n for i in range(len(c[k]))\n ]\n else:\n c_in[k] = torch.cat([unconditional_conditioning[k], c[k]])\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(\n self.model, e_t, x, t, c, **corrector_kwargs\n )\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = (\n self.model.alphas_cumprod_prev\n if use_original_steps\n else self.ddim_alphas_prev\n )\n sqrt_one_minus_alphas = (\n self.model.sqrt_one_minus_alphas_cumprod\n if use_original_steps\n else self.ddim_sqrt_one_minus_alphas\n )\n sigmas = (\n self.model.ddim_sigmas_for_original_num_steps\n if use_original_steps\n else self.ddim_sigmas\n )\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full(\n (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device\n )\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n\n print(t, sqrt_one_minus_at, a_t)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)\n\n # direction pointing to x_t\n dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.0:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(\n self,\n x0,\n c,\n t_enc,\n use_original_steps=False,\n return_intermediates=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n ):\n num_reference_steps = (\n self.ddpm_num_timesteps\n if use_original_steps\n else self.ddim_timesteps.shape[0]\n )\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc=\"Encoding Image\"):\n t = torch.full(\n (x0.shape[0],), i, device=self.model.device, dtype=torch.long\n )\n if unconditional_guidance_scale == 1.0:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(\n torch.cat((x_next, x_next)),\n torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c)),\n ),\n 2,\n )\n noise_pred = e_t_uncond + unconditional_guidance_scale * (\n noise_pred - e_t_uncond\n )\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = (\n alphas_next[i].sqrt()\n * ((1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt())\n * noise_pred\n )\n x_next = xt_weighted + weighted_noise_pred\n if (\n return_intermediates\n and i % (num_steps // return_intermediates) == 0\n and i < num_steps - 1\n ):\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n\n out = {\"x_encoded\": x_next, \"intermediate_steps\": inter_steps}\n if return_intermediates:\n out.update({\"intermediates\": intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (\n extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0\n + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise\n )\n\n @torch.no_grad()\n def decode(\n self,\n x_latent,\n cond,\n t_start,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n use_original_steps=False,\n ):\n timesteps = (\n np.arange(self.ddpm_num_timesteps)\n if use_original_steps\n else self.ddim_timesteps\n )\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"Decoding image\", total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full(\n (x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long\n )\n x_dec, _ = self.p_sample_ddim(\n x_dec,\n cond,\n ts,\n index=index,\n use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return x_dec" }, { "identifier": "CrossAttention", "path": "extern/ldm_zero123/modules/attention.py", "snippet": "class CrossAttention(nn.Module):\n def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0):\n super().__init__()\n inner_dim = dim_head * heads\n context_dim = default(context_dim, query_dim)\n\n self.scale = dim_head**-0.5\n self.heads = heads\n\n self.to_q = nn.Linear(query_dim, inner_dim, bias=False)\n self.to_k = nn.Linear(context_dim, inner_dim, bias=False)\n self.to_v = nn.Linear(context_dim, inner_dim, bias=False)\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)\n )\n\n def forward(self, x, context=None, mask=None):\n h = self.heads\n\n q = self.to_q(x)\n context = default(context, x)\n k = self.to_k(context)\n v = self.to_v(context)\n\n q, k, v = map(lambda t: rearrange(t, \"b n (h d) -> (b h) n d\", h=h), (q, k, v))\n\n sim = einsum(\"b i d, b j d -> b i j\", q, k) * self.scale\n\n if exists(mask):\n mask = rearrange(mask, \"b ... -> b (...)\")\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, \"b j -> (b h) () j\", h=h)\n sim.masked_fill_(~mask, max_neg_value)\n\n # attention, what we cannot get enough of\n attn = sim.softmax(dim=-1)\n\n out = einsum(\"b i j, b j d -> b i d\", attn, v)\n out = rearrange(out, \"(b h) n d -> b n (h d)\", h=h)\n return self.to_out(out)" }, { "identifier": "extract_into_tensor", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "make_beta_schedule", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(\n schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3\n):\n if schedule == \"linear\":\n betas = (\n torch.linspace(\n linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64\n )\n ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(\n linear_start, linear_end, n_timestep, dtype=torch.float64\n )\n elif schedule == \"sqrt\":\n betas = (\n torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n ** 0.5\n )\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "noise_like", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(\n shape[0], *((1,) * (len(shape) - 1))\n )\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DiagonalGaussianDistribution", "path": "extern/ldm_zero123/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(\n device=self.parameters.device\n )\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(\n device=self.parameters.device\n )\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.0])\n else:\n if other is None:\n return 0.5 * torch.sum(\n torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3],\n )\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var\n - 1.0\n - self.logvar\n + other.logvar,\n dim=[1, 2, 3],\n )\n\n def nll(self, sample, dims=[1, 2, 3]):\n if self.deterministic:\n return torch.Tensor([0.0])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims,\n )\n\n def mode(self):\n return self.mean" }, { "identifier": "normal_kl", "path": "extern/ldm_zero123/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "LitEma", "path": "extern/ldm_zero123/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError(\"Decay must be between 0 and 1\")\n\n self.m_name2s_name = {}\n self.register_buffer(\"decay\", torch.tensor(decay, dtype=torch.float32))\n self.register_buffer(\n \"num_updates\",\n torch.tensor(0, dtype=torch.int)\n if use_num_upates\n else torch.tensor(-1, dtype=torch.int),\n )\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace(\".\", \"\")\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(\n one_minus_decay * (shadow_params[sname] - m_param[key])\n )\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "count_params", "path": "extern/ldm_zero123/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "default", "path": "extern/ldm_zero123/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "exists", "path": "extern/ldm_zero123/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "instantiate_from_config", "path": "extern/ldm_zero123/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == \"__is_first_stage__\":\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "isimage", "path": "extern/ldm_zero123/util.py", "snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "ismap", "path": "extern/ldm_zero123/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "log_txt_as_img", "path": "extern/ldm_zero123/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype(\"data/DejaVuSans.ttf\", size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(\n xc[bi][start : start + nc] for start in range(0, len(xc[bi]), nc)\n )\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "mean_flat", "path": "extern/ldm_zero123/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" } ]
import itertools import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn from contextlib import contextmanager, nullcontext from functools import partial from einops import rearrange, repeat from omegaconf import ListConfig from pytorch_lightning.utilities.rank_zero import rank_zero_only from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from extern.ldm_zero123.models.autoencoder import ( AutoencoderKL, IdentityFirstStage, VQModelInterface, ) from extern.ldm_zero123.models.diffusion.ddim import DDIMSampler from extern.ldm_zero123.modules.attention import CrossAttention from extern.ldm_zero123.modules.diffusionmodules.util import ( extract_into_tensor, make_beta_schedule, noise_like, ) from extern.ldm_zero123.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl, ) from extern.ldm_zero123.modules.ema import LitEma from extern.ldm_zero123.util import ( count_params, default, exists, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat, )
12,692
padding=0, stride=(stride[0] // df, stride[1] // df), ) fold = torch.nn.Fold( output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h // df, w // df ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx) ) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input( self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, uncond=0.05, ): x = super().get_input(batch, k) T = batch["T"].to(memory_format=torch.contiguous_format).float() if bs is not None: x = x[:bs] T = T[:bs].to(self.device) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() cond_key = cond_key or self.cond_stage_key xc = super().get_input(batch, cond_key).to(self.device) if bs is not None: xc = xc[:bs] cond = {} # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%. random = torch.rand(x.size(0), device=x.device) prompt_mask = rearrange(random < 2 * uncond, "n -> n 1 1") input_mask = 1 - rearrange( (random >= uncond).float() * (random < 3 * uncond).float(), "n -> n 1 1 1" ) null_prompt = self.get_learned_conditioning([""]) # z.shape: [8, 4, 64, 64]; c.shape: [8, 1, 768] # print('=========== xc shape ===========', xc.shape) with torch.enable_grad(): clip_emb = self.get_learned_conditioning(xc).detach() null_prompt = self.get_learned_conditioning([""]).detach() cond["c_crossattn"] = [ self.cc_projection( torch.cat( [ torch.where(prompt_mask, null_prompt, clip_emb), T[:, None, :], ], dim=-1, ) ) ] cond["c_concat"] = [ input_mask * self.encode_first_stage((xc.to(self.device))).mode().detach() ] out = [z, cond] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out # @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, "b h w c -> b c h w").contiguous() z = 1.0 / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf ) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0.0, v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0.0, make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in [ "eps", "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet ) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule( beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) alphas = 1.0 - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) (timesteps,) = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert ( alphas_cumprod.shape[0] == self.num_timesteps ), "alphas have to be defined for each timestep" to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer("betas", to_torch(betas)) self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) self.register_buffer( "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) ) self.register_buffer( "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) ) self.register_buffer( "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * ( 1.0 - alphas_cumprod_prev ) / (1.0 - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer("posterior_variance", to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", to_torch(np.log(np.maximum(posterior_variance, 1e-20))), ) self.register_buffer( "posterior_mean_coef1", to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), ) self.register_buffer( "posterior_mean_coef2", to_torch( (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) ), ) if self.parameterization == "eps": lvlb_weights = self.betas**2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod) ) elif self.parameterization == "x0": lvlb_weights = ( 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2.0 * 1 - torch.Tensor(alphas_cumprod)) ) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len( [ name for name, _ in itertools.chain( self.named_parameters(), self.named_buffers() ) ] ) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params, ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[ i % old_shape[0], j % old_shape[1] ] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor( self.log_one_minus_alphas_cumprod, t, x_start.shape ) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape ) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1.0, 1.0) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised ) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), desc="Sampling t", total=self.num_timesteps, ): img = self.p_sample( img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised, ) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop( (batch_size, channels, image_size, image_size), return_intermediates=return_intermediates, ) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise ) def get_loss(self, pred, target, mean=True): if self.loss_type == "l1": loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == "l2": if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction="none") else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError( f"Paramterization {self.parameterization} not yet supported" ) loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = "train" if self.training else "val" loss_dict.update({f"{log_prefix}/loss_simple": loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f"{log_prefix}/loss_vlb": loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f"{log_prefix}/loss": loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, "b h w c -> b c h w") x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict( loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True ) self.log( "global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False, ) if self.use_scheduler: lr = self.optimizers().param_groups[0]["lr"] self.log( "lr_abs", lr, prog_bar=True, logger=True, on_step=True, on_epoch=False ) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + "_ema": loss_dict_ema[key] for key in loss_dict_ema} self.log_dict( loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) self.log_dict( loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample( batch_size=N, return_intermediates=True ) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__( self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, unet_trainable=True, *args, **kwargs, ): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs["timesteps"] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = "concat" if concat_mode else "crossattn" if cond_stage_config == "__is_unconditional__": conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.unet_trainable = unet_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer("scale_factor", torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward # construct linear projection layer for concatenating image CLIP embedding and RT self.cc_projection = nn.Linear(772, 768) nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) nn.init.zeros_(list(self.cc_projection.parameters())[1]) self.cc_projection.requires_grad_(True) self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule( self, ): self.cond_ids = torch.full( size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long, ) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) ).long() self.cond_ids[: self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if ( self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt ): assert ( self.scale_factor == 1.0 ), "rather not use custom rescaling and std-rescaling simultaneously" # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer("scale_factor", 1.0 / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): super().register_schedule( given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s ) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != "__is_first_stage__" assert config != "__is_unconditional__" model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list( self, samples, desc="", force_no_decoder_quantization=False ): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append( self.decode_first_stage( zd.to(self.device), force_not_quantize=force_no_decoder_quantization ) ) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError( f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented" ) return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, "encode") and callable( self.cond_stage_model.encode ): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min( torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1 )[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip( weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip( L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"], ) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold( self, x, kernel_size, stride, uf=1, df=1 ): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting( kernel_size[0], kernel_size[1], Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf), ) fold = torch.nn.Fold( output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h * uf, w * uf ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx) ) elif df > 1 and uf == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df), ) fold = torch.nn.Fold( output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h // df, w // df ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx) ) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input( self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, uncond=0.05, ): x = super().get_input(batch, k) T = batch["T"].to(memory_format=torch.contiguous_format).float() if bs is not None: x = x[:bs] T = T[:bs].to(self.device) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() cond_key = cond_key or self.cond_stage_key xc = super().get_input(batch, cond_key).to(self.device) if bs is not None: xc = xc[:bs] cond = {} # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%. random = torch.rand(x.size(0), device=x.device) prompt_mask = rearrange(random < 2 * uncond, "n -> n 1 1") input_mask = 1 - rearrange( (random >= uncond).float() * (random < 3 * uncond).float(), "n -> n 1 1 1" ) null_prompt = self.get_learned_conditioning([""]) # z.shape: [8, 4, 64, 64]; c.shape: [8, 1, 768] # print('=========== xc shape ===========', xc.shape) with torch.enable_grad(): clip_emb = self.get_learned_conditioning(xc).detach() null_prompt = self.get_learned_conditioning([""]).detach() cond["c_crossattn"] = [ self.cc_projection( torch.cat( [ torch.where(prompt_mask, null_prompt, clip_emb), T[:, None, :], ], dim=-1, ) ) ] cond["c_concat"] = [ input_mask * self.encode_first_stage((xc.to(self.device))).mode().detach() ] out = [z, cond] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out # @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, "b h w c -> b c h w").contiguous() z = 1.0 / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf ) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim
if isinstance(self.first_stage_model, VQModelInterface):
2
2023-12-01 01:59:42+00:00
16k
lucidrains/meshgpt-pytorch
meshgpt_pytorch/trainer.py
[ { "identifier": "custom_collate", "path": "meshgpt_pytorch/data.py", "snippet": "def custom_collate(data, pad_id = -1):\n is_dict = isinstance(first(data), dict)\n\n if is_dict:\n keys = first(data).keys()\n data = [d.values() for d in data]\n\n output = []\n\n for datum in zip(*data):\n if is_tensor(first(datum)):\n datum = pad_sequence(datum, batch_first = True, padding_value = pad_id)\n else:\n datum = list(datum)\n\n output.append(datum)\n\n output = tuple(output)\n\n if is_dict:\n output = dict(zip(keys, output))\n\n return output" }, { "identifier": "__version__", "path": "meshgpt_pytorch/version.py", "snippet": "" }, { "identifier": "MeshAutoencoder", "path": "meshgpt_pytorch/meshgpt_pytorch.py", "snippet": "class MeshAutoencoder(Module):\n @beartype\n def __init__(\n self,\n num_discrete_coors = 128,\n coor_continuous_range: Tuple[float, float] = (-1., 1.),\n dim_coor_embed = 64,\n num_discrete_area = 128,\n dim_area_embed = 16,\n num_discrete_normals = 128,\n dim_normal_embed = 64,\n num_discrete_angle = 128,\n dim_angle_embed = 16,\n encoder_dims_through_depth: Tuple[int, ...] = (\n 64, 128, 256, 256, 576\n ),\n init_decoder_conv_kernel = 7,\n decoder_dims_through_depth: Tuple[int, ...] = (\n 128, 128, 128, 128,\n 192, 192, 192, 192,\n 256, 256, 256, 256, 256, 256,\n 384, 384, 384\n ),\n dim_codebook = 192,\n num_quantizers = 2, # or 'D' in the paper\n codebook_size = 16384, # they use 16k, shared codebook between layers\n use_residual_lfq = True, # whether to use the latest lookup-free quantization\n rq_kwargs: dict = dict(\n quantize_dropout = True,\n quantize_dropout_cutoff_index = 1,\n quantize_dropout_multiple_of = 1,\n ),\n rvq_kwargs: dict = dict(\n kmeans_init = True,\n threshold_ema_dead_code = 2,\n ),\n rlfq_kwargs: dict = dict(\n frac_per_sample_entropy = 1.\n ),\n rvq_stochastic_sample_codes = True,\n sageconv_kwargs: dict = dict(\n normalize = True,\n project = True\n ),\n commit_loss_weight = 0.1,\n bin_smooth_blur_sigma = 0.4, # they blur the one hot discretized coordinate positions\n attn_encoder_depth = 0,\n attn_decoder_depth = 0,\n local_attn_kwargs: dict = dict(\n dim_head = 32,\n heads = 8\n ),\n local_attn_window_size = 64,\n linear_attn_kwargs: dict = dict(\n dim_head = 8,\n heads = 16\n ),\n use_linear_attn = True,\n pad_id = -1,\n flash_attn = True,\n sageconv_dropout = 0.,\n attn_dropout = 0.,\n ff_dropout = 0.,\n resnet_dropout = 0,\n checkpoint_quantizer = False\n ):\n super().__init__()\n\n # main face coordinate embedding\n\n self.num_discrete_coors = num_discrete_coors\n self.coor_continuous_range = coor_continuous_range\n\n self.discretize_face_coords = partial(discretize, num_discrete = num_discrete_coors, continuous_range = coor_continuous_range)\n self.coor_embed = nn.Embedding(num_discrete_coors, dim_coor_embed)\n\n # derived feature embedding\n\n self.discretize_angle = partial(discretize, num_discrete = num_discrete_angle, continuous_range = (0., pi))\n self.angle_embed = nn.Embedding(num_discrete_angle, dim_angle_embed)\n\n lo, hi = coor_continuous_range\n self.discretize_area = partial(discretize, num_discrete = num_discrete_area, continuous_range = (0., (hi - lo) ** 2))\n self.area_embed = nn.Embedding(num_discrete_area, dim_area_embed)\n\n self.discretize_normals = partial(discretize, num_discrete = num_discrete_normals, continuous_range = coor_continuous_range)\n self.normal_embed = nn.Embedding(num_discrete_normals, dim_normal_embed)\n\n # attention related\n\n attn_kwargs = dict(\n causal = False,\n prenorm = True,\n dropout = attn_dropout,\n window_size = local_attn_window_size,\n )\n\n # initial dimension\n\n init_dim = dim_coor_embed * 9 + dim_angle_embed * 3 + dim_normal_embed * 3 + dim_area_embed\n\n # project into model dimension\n\n self.project_in = nn.Linear(init_dim, dim_codebook)\n\n # initial sage conv\n\n sageconv_kwargs = {**sageconv_kwargs, 'sageconv_dropout' : sageconv_dropout}\n\n init_encoder_dim, *encoder_dims_through_depth = encoder_dims_through_depth\n curr_dim = init_encoder_dim\n\n self.init_sage_conv = SAGEConv(dim_codebook, init_encoder_dim, **sageconv_kwargs)\n\n self.init_encoder_act_and_norm = nn.Sequential(\n nn.SiLU(),\n nn.LayerNorm(init_encoder_dim)\n )\n\n self.encoders = ModuleList([])\n\n for dim_layer in encoder_dims_through_depth:\n sage_conv = SAGEConv(\n curr_dim,\n dim_layer,\n **sageconv_kwargs\n )\n\n self.encoders.append(sage_conv)\n curr_dim = dim_layer\n\n self.encoder_attn_blocks = ModuleList([])\n\n for _ in range(attn_encoder_depth):\n self.encoder_attn_blocks.append(nn.ModuleList([\n TaylorSeriesLinearAttn(curr_dim, prenorm = True, **linear_attn_kwargs) if use_linear_attn else None,\n LocalMHA(dim = curr_dim, **attn_kwargs, **local_attn_kwargs),\n nn.Sequential(RMSNorm(curr_dim), FeedForward(curr_dim, glu = True, dropout = ff_dropout))\n ]))\n\n # residual quantization\n\n self.codebook_size = codebook_size\n self.num_quantizers = num_quantizers\n\n self.project_dim_codebook = nn.Linear(curr_dim, dim_codebook * 3)\n\n if use_residual_lfq:\n self.quantizer = ResidualLFQ(\n dim = dim_codebook,\n num_quantizers = num_quantizers,\n codebook_size = codebook_size,\n commitment_loss_weight = 1.,\n **rlfq_kwargs,\n **rq_kwargs\n )\n else:\n self.quantizer = ResidualVQ(\n dim = dim_codebook,\n num_quantizers = num_quantizers,\n codebook_size = codebook_size,\n shared_codebook = True,\n commitment_weight = 1.,\n stochastic_sample_codes = rvq_stochastic_sample_codes,\n **rvq_kwargs,\n **rq_kwargs\n )\n\n self.checkpoint_quantizer = checkpoint_quantizer # whether to memory checkpoint the quantizer\n\n self.pad_id = pad_id # for variable lengthed faces, padding quantized ids will be set to this value\n\n # decoder\n\n decoder_input_dim = dim_codebook * 3\n\n self.decoder_attn_blocks = ModuleList([])\n\n for _ in range(attn_decoder_depth):\n self.decoder_attn_blocks.append(nn.ModuleList([\n TaylorSeriesLinearAttn(decoder_input_dim, prenorm = True, **linear_attn_kwargs) if use_linear_attn else None,\n LocalMHA(dim = decoder_input_dim, **attn_kwargs, **local_attn_kwargs),\n nn.Sequential(RMSNorm(decoder_input_dim), FeedForward(decoder_input_dim, glu = True, dropout = ff_dropout))\n ]))\n\n init_decoder_dim, *decoder_dims_through_depth = decoder_dims_through_depth\n curr_dim = init_decoder_dim\n\n assert is_odd(init_decoder_conv_kernel)\n\n self.init_decoder_conv = nn.Sequential(\n nn.Conv1d(dim_codebook * 3, init_decoder_dim, kernel_size = init_decoder_conv_kernel, padding = init_decoder_conv_kernel // 2),\n nn.SiLU(),\n Rearrange('b c n -> b n c'),\n nn.LayerNorm(init_decoder_dim),\n Rearrange('b n c -> b c n')\n )\n\n self.decoders = ModuleList([])\n\n for dim_layer in decoder_dims_through_depth:\n resnet_block = ResnetBlock(curr_dim, dim_layer, dropout = resnet_dropout)\n\n self.decoders.append(resnet_block)\n curr_dim = dim_layer\n\n self.to_coor_logits = nn.Sequential(\n nn.Linear(curr_dim, num_discrete_coors * 9),\n Rearrange('... (v c) -> ... v c', v = 9)\n )\n\n # loss related\n\n self.commit_loss_weight = commit_loss_weight\n self.bin_smooth_blur_sigma = bin_smooth_blur_sigma\n\n @beartype\n def encode(\n self,\n *,\n vertices: TensorType['b', 'nv', 3, float],\n faces: TensorType['b', 'nf', 3, int],\n face_edges: TensorType['b', 'e', 2, int],\n face_mask: TensorType['b', 'nf', bool],\n face_edges_mask: TensorType['b', 'e', bool],\n return_face_coordinates = False\n ):\n \"\"\"\n einops:\n b - batch\n nf - number of faces\n nv - number of vertices (3)\n c - coordinates (3)\n d - embed dim\n \"\"\"\n\n batch, num_vertices, num_coors, device = *vertices.shape, vertices.device\n _, num_faces, _ = faces.shape\n\n face_without_pad = faces.masked_fill(~rearrange(face_mask, 'b nf -> b nf 1'), 0)\n\n faces_vertices = repeat(face_without_pad, 'b nf nv -> b nf nv c', c = num_coors)\n vertices = repeat(vertices, 'b nv c -> b nf nv c', nf = num_faces)\n\n # continuous face coords\n\n face_coords = vertices.gather(-2, faces_vertices)\n\n # compute derived features and embed\n\n derived_features = get_derived_face_features(face_coords)\n\n discrete_angle = self.discretize_angle(derived_features['angles'])\n angle_embed = self.angle_embed(discrete_angle)\n\n discrete_area = self.discretize_area(derived_features['area'])\n area_embed = self.area_embed(discrete_area)\n\n discrete_normal = self.discretize_normals(derived_features['normals'])\n normal_embed = self.normal_embed(discrete_normal)\n\n # discretize vertices for face coordinate embedding\n\n discrete_face_coords = self.discretize_face_coords(face_coords)\n discrete_face_coords = rearrange(discrete_face_coords, 'b nf nv c -> b nf (nv c)') # 9 coordinates per face\n\n face_coor_embed = self.coor_embed(discrete_face_coords)\n face_coor_embed = rearrange(face_coor_embed, 'b nf c d -> b nf (c d)')\n\n # combine all features and project into model dimension\n\n face_embed, _ = pack([face_coor_embed, angle_embed, area_embed, normal_embed], 'b nf *')\n face_embed = self.project_in(face_embed)\n\n # handle variable lengths by using masked_select and masked_scatter\n\n # first handle edges\n # needs to be offset by number of faces for each batch\n\n face_index_offsets = reduce(face_mask.long(), 'b nf -> b', 'sum')\n face_index_offsets = F.pad(face_index_offsets.cumsum(dim = 0), (1, -1), value = 0)\n face_index_offsets = rearrange(face_index_offsets, 'b -> b 1 1')\n\n face_edges = face_edges + face_index_offsets\n face_edges = face_edges[face_edges_mask]\n face_edges = rearrange(face_edges, 'be ij -> ij be')\n\n # next prepare the face_mask for using masked_select and masked_scatter\n\n orig_face_embed_shape = face_embed.shape[:2]\n\n face_embed = face_embed[face_mask]\n\n # initial sage conv followed by activation and norm\n\n face_embed = self.init_sage_conv(face_embed, face_edges)\n\n face_embed = self.init_encoder_act_and_norm(face_embed)\n\n for conv in self.encoders:\n face_embed = conv(face_embed, face_edges)\n\n shape = (*orig_face_embed_shape, face_embed.shape[-1])\n\n face_embed = face_embed.new_zeros(shape).masked_scatter(rearrange(face_mask, '... -> ... 1'), face_embed)\n\n for linear_attn, attn, ff in self.encoder_attn_blocks:\n if exists(linear_attn):\n face_embed = linear_attn(face_embed, mask = face_mask) + face_embed\n\n face_embed = attn(face_embed, mask = face_mask) + face_embed\n face_embed = ff(face_embed) + face_embed\n\n if not return_face_coordinates:\n return face_embed\n\n return face_embed, discrete_face_coords\n\n @beartype\n def quantize(\n self,\n *,\n faces: TensorType['b', 'nf', 3, int],\n face_mask: TensorType['b', 'n', bool],\n face_embed: TensorType['b', 'nf', 'd', float],\n pad_id = None,\n rvq_sample_codebook_temp = 1.\n ):\n pad_id = default(pad_id, self.pad_id)\n batch, num_faces, device = *faces.shape[:2], faces.device\n\n max_vertex_index = faces.amax()\n num_vertices = int(max_vertex_index.item() + 1)\n\n face_embed = self.project_dim_codebook(face_embed)\n face_embed = rearrange(face_embed, 'b nf (nv d) -> b nf nv d', nv = 3)\n\n vertex_dim = face_embed.shape[-1]\n vertices = torch.zeros((batch, num_vertices, vertex_dim), device = device)\n\n # create pad vertex, due to variable lengthed faces\n\n pad_vertex_id = num_vertices\n vertices = pad_at_dim(vertices, (0, 1), dim = -2, value = 0.)\n\n faces = faces.masked_fill(~rearrange(face_mask, 'b n -> b n 1'), pad_vertex_id)\n\n # prepare for scatter mean\n\n faces_with_dim = repeat(faces, 'b nf nv -> b (nf nv) d', d = vertex_dim)\n\n face_embed = rearrange(face_embed, 'b ... d -> b (...) d')\n\n # scatter mean\n\n averaged_vertices = scatter_mean(vertices, faces_with_dim, face_embed, dim = -2)\n\n # mask out null vertex token\n\n mask = torch.ones((batch, num_vertices + 1), device = device, dtype = torch.bool)\n mask[:, -1] = False\n\n # rvq specific kwargs\n\n quantize_kwargs = dict(mask = mask)\n\n if isinstance(self.quantizer, ResidualVQ):\n quantize_kwargs.update(sample_codebook_temp = rvq_sample_codebook_temp)\n\n # a quantize function that makes it memory checkpointable\n\n def quantize_wrapper_fn(inp):\n unquantized, quantize_kwargs = inp\n return self.quantizer(unquantized, **quantize_kwargs)\n\n # maybe checkpoint the quantize fn\n\n if self.checkpoint_quantizer:\n quantize_wrapper_fn = partial(checkpoint, quantize_wrapper_fn, use_reentrant = False)\n\n # residual VQ\n\n quantized, codes, commit_loss = quantize_wrapper_fn((averaged_vertices, quantize_kwargs))\n\n # gather quantized vertexes back to faces for decoding\n # now the faces have quantized vertices\n\n face_embed_output = quantized.gather(-2, faces_with_dim)\n face_embed_output = rearrange(face_embed_output, 'b (nf nv) d -> b nf (nv d)', nv = 3)\n\n # vertex codes also need to be gathered to be organized by face sequence\n # for autoregressive learning\n\n faces_with_quantized_dim = repeat(faces, 'b nf nv -> b (nf nv) q', q = self.num_quantizers)\n codes_output = codes.gather(-2, faces_with_quantized_dim)\n\n # make sure codes being outputted have this padding\n\n face_mask = repeat(face_mask, 'b nf -> b (nf nv) 1', nv = 3)\n codes_output = codes_output.masked_fill(~face_mask, self.pad_id)\n\n # output quantized, codes, as well as commitment loss\n\n return face_embed_output, codes_output, commit_loss\n\n @beartype\n def decode(\n self,\n quantized: TensorType['b', 'n', 'd', float],\n face_mask: TensorType['b', 'n', bool]\n ):\n conv_face_mask = rearrange(face_mask, 'b n -> b 1 n')\n\n x = quantized\n\n for linear_attn, attn, ff in self.decoder_attn_blocks:\n if exists(linear_attn):\n x = linear_attn(x, mask = face_mask) + x\n\n x = attn(x, mask = face_mask) + x\n x = ff(x) + x\n\n x = rearrange(x, 'b n d -> b d n')\n\n x = x.masked_fill(~conv_face_mask, 0.)\n x = self.init_decoder_conv(x)\n\n for resnet_block in self.decoders:\n x = resnet_block(x, mask = conv_face_mask)\n\n return rearrange(x, 'b d n -> b n d')\n\n @beartype\n @torch.no_grad()\n def decode_from_codes_to_faces(\n self,\n codes: Tensor,\n face_mask: Optional[TensorType['b', 'n', bool]] = None,\n return_discrete_codes = False\n ):\n codes = rearrange(codes, 'b ... -> b (...)')\n\n if not exists(face_mask):\n face_mask = reduce(codes != self.pad_id, 'b (nf nv q) -> b nf', 'all', nv = 3, q = self.num_quantizers)\n\n # handle different code shapes\n\n codes = rearrange(codes, 'b (n q) -> b n q', q = self.num_quantizers)\n\n # decode\n\n quantized = self.quantizer.get_output_from_indices(codes)\n quantized = rearrange(quantized, 'b (nf nv) d -> b nf (nv d)', nv = 3)\n\n decoded = self.decode(\n quantized,\n face_mask = face_mask\n )\n\n decoded = decoded.masked_fill(~face_mask[..., None], 0.)\n pred_face_coords = self.to_coor_logits(decoded)\n\n pred_face_coords = pred_face_coords.argmax(dim = -1)\n\n pred_face_coords = rearrange(pred_face_coords, '... (v c) -> ... v c', v = 3)\n\n # back to continuous space\n\n continuous_coors = undiscretize(\n pred_face_coords,\n num_discrete = self.num_discrete_coors,\n continuous_range = self.coor_continuous_range\n )\n\n # mask out with nan\n\n continuous_coors = continuous_coors.masked_fill(~rearrange(face_mask, 'b nf -> b nf 1 1'), float('nan'))\n\n if not return_discrete_codes:\n return continuous_coors, face_mask\n\n return continuous_coors, pred_face_coords, face_mask\n\n @torch.no_grad()\n def tokenize(self, vertices, faces, face_edges = None, **kwargs):\n assert 'return_codes' not in kwargs\n\n inputs = [vertices, faces, face_edges]\n inputs = [*filter(exists, inputs)]\n ndims = {i.ndim for i in inputs}\n\n assert len(ndims) == 1\n batch_less = first(list(ndims)) == 2\n\n if batch_less:\n inputs = [rearrange(i, '... -> 1 ...') for i in inputs]\n\n input_kwargs = dict(zip(['vertices', 'faces', 'face_edges'], inputs))\n\n self.eval()\n\n codes = self.forward(\n **input_kwargs,\n return_codes = True,\n **kwargs\n )\n\n if batch_less:\n codes = rearrange(codes, '1 ... -> ...')\n\n return codes\n\n @beartype\n def forward(\n self,\n *,\n vertices: TensorType['b', 'nv', 3, float],\n faces: TensorType['b', 'nf', 3, int],\n face_edges: Optional[TensorType['b', 'e', 2, int]] = None,\n return_codes = False,\n return_loss_breakdown = False,\n return_recon_faces = False,\n only_return_recon_faces = False,\n rvq_sample_codebook_temp = 1.\n ):\n if not exists(face_edges):\n face_edges = derive_face_edges_from_faces(faces, pad_id = self.pad_id)\n\n num_faces, num_face_edges, device = faces.shape[1], face_edges.shape[1], faces.device\n\n face_mask = reduce(faces != self.pad_id, 'b nf c -> b nf', 'all')\n face_edges_mask = reduce(face_edges != self.pad_id, 'b e ij -> b e', 'all')\n\n encoded, face_coordinates = self.encode(\n vertices = vertices,\n faces = faces,\n face_edges = face_edges,\n face_edges_mask = face_edges_mask,\n face_mask = face_mask,\n return_face_coordinates = True\n )\n\n quantized, codes, commit_loss = self.quantize(\n face_embed = encoded,\n faces = faces,\n face_mask = face_mask,\n rvq_sample_codebook_temp = rvq_sample_codebook_temp\n )\n\n if return_codes:\n assert not return_recon_faces, 'cannot return reconstructed faces when just returning raw codes'\n\n codes = codes.masked_fill(~repeat(face_mask, 'b nf -> b (nf 3) 1'), self.pad_id)\n return codes\n\n decode = self.decode(\n quantized,\n face_mask = face_mask\n )\n\n pred_face_coords = self.to_coor_logits(decode)\n\n # compute reconstructed faces if needed\n\n if return_recon_faces or only_return_recon_faces:\n\n recon_faces = undiscretize(\n pred_face_coords.argmax(dim = -1),\n num_discrete = self.num_discrete_coors,\n continuous_range = self.coor_continuous_range,\n )\n\n recon_faces = rearrange(recon_faces, 'b nf (nv c) -> b nf nv c', nv = 3)\n face_mask = rearrange(face_mask, 'b nf -> b nf 1 1')\n recon_faces = recon_faces.masked_fill(~face_mask, float('nan'))\n face_mask = rearrange(face_mask, 'b nf 1 1 -> b nf')\n\n if only_return_recon_faces:\n return recon_faces\n\n # prepare for recon loss\n\n pred_face_coords = rearrange(pred_face_coords, 'b ... c -> b c (...)')\n face_coordinates = rearrange(face_coordinates, 'b ... -> b 1 (...)')\n\n # reconstruction loss on discretized coordinates on each face\n # they also smooth (blur) the one hot positions, localized label smoothing basically\n\n with autocast(enabled = False):\n pred_log_prob = pred_face_coords.log_softmax(dim = 1)\n\n target_one_hot = torch.zeros_like(pred_log_prob).scatter(1, face_coordinates, 1.)\n\n if self.bin_smooth_blur_sigma >= 0.:\n target_one_hot = gaussian_blur_1d(target_one_hot, sigma = self.bin_smooth_blur_sigma)\n\n # cross entropy with localized smoothing\n\n recon_losses = (-target_one_hot * pred_log_prob).sum(dim = 1)\n\n face_mask = repeat(face_mask, 'b nf -> b (nf r)', r = 9)\n recon_loss = recon_losses[face_mask].mean()\n\n # calculate total loss\n\n total_loss = recon_loss + \\\n commit_loss.sum() * self.commit_loss_weight\n\n # calculate loss breakdown if needed\n\n loss_breakdown = (recon_loss, commit_loss)\n\n # some return logic\n\n if not return_loss_breakdown:\n if not return_recon_faces:\n return total_loss\n\n return recon_faces, total_loss\n\n if not return_recon_faces:\n return total_loss, loss_breakdown\n\n return recon_faces, total_loss, loss_breakdown" }, { "identifier": "MeshTransformer", "path": "meshgpt_pytorch/meshgpt_pytorch.py", "snippet": "class MeshTransformer(Module):\n @beartype\n def __init__(\n self,\n autoencoder: MeshAutoencoder,\n *,\n dim: Union[int, Tuple[int, int]] = 512,\n max_seq_len = 8192,\n flash_attn = True,\n attn_depth = 12,\n attn_dim_head = 64,\n attn_heads = 16,\n attn_kwargs: dict = dict(\n ff_glu = True,\n num_mem_kv = 4\n ),\n dropout = 0.,\n coarse_pre_gateloop_depth = 2,\n fine_pre_gateloop_depth = 2,\n gateloop_use_heinsen = False,\n fine_attn_depth = 2,\n fine_attn_dim_head = 32,\n fine_attn_heads = 8,\n pad_id = -1,\n condition_on_text = False,\n text_condition_model_types = ('t5',),\n text_condition_cond_drop_prob = 0.25\n ):\n super().__init__()\n\n dim, dim_fine = (dim, dim) if isinstance(dim, int) else dim\n\n self.autoencoder = autoencoder\n set_module_requires_grad_(autoencoder, False)\n\n self.codebook_size = autoencoder.codebook_size\n self.num_quantizers = autoencoder.num_quantizers\n\n self.sos_token = nn.Parameter(torch.randn(dim_fine))\n self.eos_token_id = self.codebook_size\n\n # they use axial positional embeddings\n\n assert divisible_by(max_seq_len, 3 * self.num_quantizers), f'max_seq_len ({max_seq_len}) must be divisible by (3 x {self.num_quantizers}) = {3 * self.num_quantizers}' # 3 vertices per face, with D codes per vertex\n\n self.token_embed = nn.Embedding(self.codebook_size + 1, dim)\n\n self.quantize_level_embed = nn.Parameter(torch.randn(self.num_quantizers, dim))\n self.vertex_embed = nn.Parameter(torch.randn(3, dim))\n\n self.abs_pos_emb = nn.Embedding(max_seq_len, dim)\n\n self.max_seq_len = max_seq_len\n\n # text condition\n\n self.condition_on_text = condition_on_text\n self.conditioner = None\n\n cross_attn_dim_context = None\n\n if condition_on_text:\n self.conditioner = TextEmbeddingReturner(\n model_types = text_condition_model_types,\n cond_drop_prob = text_condition_cond_drop_prob\n )\n cross_attn_dim_context = self.conditioner.dim_latent\n\n # for summarizing the vertices of each face\n\n self.to_face_tokens = nn.Sequential(\n nn.Linear(self.num_quantizers * 3 * dim, dim),\n nn.LayerNorm(dim)\n )\n\n self.coarse_gateloop_block = GateLoopBlock(dim, depth = coarse_pre_gateloop_depth, use_heinsen = gateloop_use_heinsen) if coarse_pre_gateloop_depth > 0 else None\n\n # main autoregressive attention network\n # attending to a face token\n\n self.decoder = Decoder(\n dim = dim,\n depth = attn_depth,\n dim_head = attn_dim_head,\n heads = attn_heads,\n attn_flash = flash_attn,\n attn_dropout = dropout,\n ff_dropout = dropout,\n cross_attend = condition_on_text,\n cross_attn_dim_context = cross_attn_dim_context,\n **attn_kwargs\n )\n\n # projection from coarse to fine, if needed\n\n self.maybe_project_coarse_to_fine = nn.Linear(dim, dim_fine) if dim != dim_fine else nn.Identity()\n\n # address a weakness in attention\n\n self.fine_gateloop_block = GateLoopBlock(dim, depth = fine_pre_gateloop_depth) if fine_pre_gateloop_depth > 0 else None\n\n # decoding the vertices, 2-stage hierarchy\n\n self.fine_decoder = Decoder(\n dim = dim_fine,\n depth = fine_attn_depth,\n dim_head = attn_dim_head,\n heads = attn_heads,\n attn_flash = flash_attn,\n attn_dropout = dropout,\n ff_dropout = dropout,\n **attn_kwargs\n )\n\n # to logits\n\n self.to_logits = nn.Linear(dim_fine, self.codebook_size + 1)\n\n # padding id\n # force the autoencoder to use the same pad_id given in transformer\n\n self.pad_id = pad_id\n autoencoder.pad_id = pad_id\n\n @property\n def device(self):\n return next(self.parameters()).device\n\n @beartype\n @torch.no_grad()\n def embed_texts(self, texts: Union[str, List[str]]):\n single_text = not isinstance(texts, list)\n if single_text:\n texts = [texts]\n\n assert exists(self.conditioner)\n text_embeds = self.conditioner.embed_texts(texts).detach()\n\n if single_text:\n text_embeds = text_embeds[0]\n\n return text_embeds\n\n @eval_decorator\n @torch.no_grad()\n @beartype\n def generate(\n self,\n prompt: Optional[Tensor] = None,\n batch_size: Optional[int] = None,\n filter_logits_fn: Callable = top_k,\n filter_kwargs: dict = dict(),\n temperature = 1.,\n return_codes = False,\n texts: Optional[List[str]] = None,\n text_embeds: Optional[Tensor] = None,\n cond_scale = 1.,\n cache_kv = True,\n max_seq_len = None,\n face_coords_to_file: Optional[Callable[[Tensor], Any]] = None\n ):\n max_seq_len = default(max_seq_len, self.max_seq_len)\n\n if exists(prompt):\n assert not exists(batch_size)\n\n prompt = rearrange(prompt, 'b ... -> b (...)')\n assert prompt.shape[-1] <= self.max_seq_len\n\n batch_size = prompt.shape[0]\n\n if self.condition_on_text:\n assert exists(texts) ^ exists(text_embeds), '`text` or `text_embeds` must be passed in if `condition_on_text` is set to True'\n if exists(texts):\n text_embeds = self.embed_texts(texts)\n\n batch_size = default(batch_size, text_embeds.shape[0])\n\n batch_size = default(batch_size, 1)\n\n codes = default(prompt, torch.empty((batch_size, 0), dtype = torch.long, device = self.device))\n\n curr_length = codes.shape[-1]\n\n cache = (None, None)\n\n for i in tqdm(range(curr_length, max_seq_len)):\n # v1([q1] [q2] [q1] [q2] [q1] [q2]) v2([eos| q1] [q2] [q1] [q2] [q1] [q2]) -> 0 1 2 3 4 5 6 7 8 9 10 11 12 -> v1(F F F F F F) v2(T F F F F F) v3(T F F F F F)\n\n can_eos = i != 0 and divisible_by(i, self.num_quantizers * 3) # only allow for eos to be decoded at the end of each face, defined as 3 vertices with D residual VQ codes\n\n output = self.forward_on_codes(\n codes,\n text_embeds = text_embeds,\n return_loss = False,\n return_cache = cache_kv,\n append_eos = False,\n cond_scale = cond_scale,\n cfg_routed_kwargs = dict(\n cache = cache\n )\n )\n\n if cache_kv:\n logits, cache = output\n\n if cond_scale == 1.:\n cache = (cache, None)\n else:\n logits = output\n\n logits = logits[:, -1]\n\n if not can_eos:\n logits[:, -1] = -torch.finfo(logits.dtype).max\n\n filtered_logits = filter_logits_fn(logits, **filter_kwargs)\n\n if temperature == 0.:\n sample = filtered_logits.argmax(dim = -1)\n else:\n probs = F.softmax(filtered_logits / temperature, dim = -1)\n sample = torch.multinomial(probs, 1)\n\n codes, _ = pack([codes, sample], 'b *')\n\n # check for all rows to have [eos] to terminate\n\n is_eos_codes = (codes == self.eos_token_id)\n\n if is_eos_codes.any(dim = -1).all():\n break\n\n # mask out to padding anything after the first eos\n\n mask = is_eos_codes.float().cumsum(dim = -1) >= 1\n codes = codes.masked_fill(mask, self.pad_id)\n\n # remove a potential extra token from eos, if breaked early\n\n code_len = codes.shape[-1]\n round_down_code_len = code_len // self.num_quantizers * self.num_quantizers\n codes = codes[:, :round_down_code_len]\n\n # early return of raw residual quantizer codes\n\n if return_codes:\n codes = rearrange(codes, 'b (n q) -> b n q', q = self.num_quantizers)\n return codes\n\n self.autoencoder.eval()\n face_coords, face_mask = self.autoencoder.decode_from_codes_to_faces(codes)\n\n if not exists(face_coords_to_file):\n return face_coords, face_mask\n\n files = [face_coords_to_file(coords[mask]) for coords, mask in zip(face_coords, face_mask)]\n return files\n\n def forward(\n self,\n *,\n vertices: TensorType['b', 'nv', 3, int],\n faces: TensorType['b', 'nf', 3, int],\n face_edges: Optional[TensorType['b', 'e', 2, int]] = None,\n codes: Optional[Tensor] = None,\n cache: Optional[LayerIntermediates] = None,\n **kwargs\n ):\n if not exists(codes):\n codes = self.autoencoder.tokenize(\n vertices = vertices,\n faces = faces,\n face_edges = face_edges\n )\n\n return self.forward_on_codes(codes, cache = cache, **kwargs)\n\n @classifier_free_guidance\n def forward_on_codes(\n self,\n codes = None,\n return_loss = True,\n return_cache = False,\n append_eos = True,\n cache = None,\n texts: Optional[List[str]] = None,\n text_embeds: Optional[Tensor] = None,\n cond_drop_prob = 0.\n ):\n # handle text conditions\n\n attn_context_kwargs = dict()\n\n if self.condition_on_text:\n assert exists(texts) ^ exists(text_embeds), '`text` or `text_embeds` must be passed in if `condition_on_text` is set to True'\n\n if exists(texts):\n text_embeds = self.conditioner.embed_texts(texts)\n\n if exists(codes):\n assert text_embeds.shape[0] == codes.shape[0], 'batch size of texts or text embeddings is not equal to the batch size of the mesh codes'\n\n _, maybe_dropped_text_embeds = self.conditioner(\n text_embeds = text_embeds,\n cond_drop_prob = cond_drop_prob\n )\n\n attn_context_kwargs = dict(\n context = maybe_dropped_text_embeds.embed,\n context_mask = maybe_dropped_text_embeds.mask\n )\n\n # take care of codes that may be flattened\n\n if codes.ndim > 2:\n codes = rearrange(codes, 'b ... -> b (...)')\n\n # get some variable\n\n batch, seq_len, device = *codes.shape, codes.device\n\n assert seq_len <= self.max_seq_len, f'received codes of length {seq_len} but needs to be less than or equal to set max_seq_len {self.max_seq_len}'\n\n # auto append eos token\n\n if append_eos:\n assert exists(codes)\n\n code_lens = ((codes == self.pad_id).cumsum(dim = -1) == 0).sum(dim = -1)\n\n codes = F.pad(codes, (0, 1), value = 0)\n\n batch_arange = torch.arange(batch, device = device)\n\n batch_arange = rearrange(batch_arange, '... -> ... 1')\n code_lens = rearrange(code_lens, '... -> ... 1')\n\n codes[batch_arange, code_lens] = self.eos_token_id\n\n # if returning loss, save the labels for cross entropy\n\n if return_loss:\n assert seq_len > 0\n codes, labels = codes[:, :-1], codes\n\n # token embed (each residual VQ id)\n\n codes = codes.masked_fill(codes == self.pad_id, 0)\n codes = self.token_embed(codes)\n\n # codebook embed + absolute positions\n\n seq_arange = torch.arange(codes.shape[-2], device = device)\n\n codes = codes + self.abs_pos_emb(seq_arange)\n\n # embedding for quantizer level\n\n code_len = codes.shape[1]\n\n level_embed = repeat(self.quantize_level_embed, 'q d -> (r q) d', r = ceil(code_len / self.num_quantizers))\n codes = codes + level_embed[:code_len]\n\n # embedding for each vertex\n\n vertex_embed = repeat(self.vertex_embed, 'nv d -> (r nv q) d', r = ceil(code_len / (3 * self.num_quantizers)), q = self.num_quantizers)\n codes = codes + vertex_embed[:code_len]\n\n # create a token per face, by summarizing the 3 vertices\n # this is similar in design to the RQ transformer from Lee et al. https://arxiv.org/abs/2203.01941\n\n num_tokens_per_face = self.num_quantizers * 3\n\n curr_vertex_pos = code_len % num_tokens_per_face # the current intra-face vertex-code position id, needed for caching at the fine decoder stage\n\n code_len_is_multiple_of_face = divisible_by(code_len, num_tokens_per_face)\n\n next_multiple_code_len = ceil(code_len / num_tokens_per_face) * num_tokens_per_face\n\n codes = pad_to_length(codes, next_multiple_code_len, dim = -2)\n\n # grouped codes will be used for the second stage\n\n grouped_codes = rearrange(codes, 'b (nf n) d -> b nf n d', n = num_tokens_per_face)\n\n # create the coarse tokens for the first attention network\n\n face_codes = grouped_codes if code_len_is_multiple_of_face else grouped_codes[:, :-1]\n face_codes = rearrange(face_codes, 'b nf n d -> b nf (n d)')\n face_codes = self.to_face_tokens(face_codes)\n\n face_codes_len = face_codes.shape[-2]\n\n # cache logic\n\n (\n cached_attended_face_codes,\n coarse_cache,\n fine_cache,\n coarse_gateloop_cache,\n fine_gateloop_cache\n ) = cache if exists(cache) else ((None,) * 5)\n\n if exists(cache):\n cached_face_codes_len = cached_attended_face_codes.shape[-2]\n need_call_first_transformer = face_codes_len > cached_face_codes_len\n else:\n need_call_first_transformer = True\n\n should_cache_fine = not divisible_by(curr_vertex_pos + 1, num_tokens_per_face)\n\n # attention on face codes (coarse)\n\n if need_call_first_transformer:\n if exists(self.coarse_gateloop_block):\n face_codes, coarse_gateloop_cache = self.coarse_gateloop_block(face_codes, cache = coarse_gateloop_cache)\n\n attended_face_codes, coarse_cache = self.decoder(\n face_codes,\n cache = coarse_cache,\n return_hiddens = True,\n **attn_context_kwargs\n )\n\n attended_face_codes = safe_cat((cached_attended_face_codes, attended_face_codes), dim = -2)\n else:\n attended_face_codes = cached_attended_face_codes\n\n # maybe project from coarse to fine dimension for hierarchical transformers\n\n attended_face_codes = self.maybe_project_coarse_to_fine(attended_face_codes)\n\n # auto prepend sos token\n\n sos = repeat(self.sos_token, 'd -> b d', b = batch)\n\n attended_face_codes_with_sos, _ = pack([sos, attended_face_codes], 'b * d')\n\n grouped_codes = pad_to_length(grouped_codes, attended_face_codes_with_sos.shape[-2], dim = 1)\n fine_vertex_codes, _ = pack([attended_face_codes_with_sos, grouped_codes], 'b n * d')\n\n fine_vertex_codes = fine_vertex_codes[..., :-1, :]\n\n # gateloop layers\n\n if exists(self.fine_gateloop_block):\n fine_vertex_codes = rearrange(fine_vertex_codes, 'b nf n d -> b (nf n) d')\n orig_length = fine_vertex_codes.shape[-2]\n fine_vertex_codes = fine_vertex_codes[:, :(code_len + 1)]\n\n fine_vertex_codes, fine_gateloop_cache = self.fine_gateloop_block(fine_vertex_codes, cache = fine_gateloop_cache)\n\n fine_vertex_codes = pad_to_length(fine_vertex_codes, orig_length, dim = -2)\n fine_vertex_codes = rearrange(fine_vertex_codes, 'b (nf n) d -> b nf n d', n = num_tokens_per_face)\n\n # fine attention - 2nd stage\n\n if exists(cache):\n fine_vertex_codes = fine_vertex_codes[:, -1:]\n\n if exists(fine_cache):\n for attn_intermediate in fine_cache.attn_intermediates:\n ck, cv = attn_intermediate.cached_kv\n ck, cv = map(lambda t: rearrange(t, '(b nf) ... -> b nf ...', b = batch), (ck, cv))\n ck, cv = map(lambda t: t[:, -1, :, :curr_vertex_pos], (ck, cv))\n attn_intermediate.cached_kv = (ck, cv)\n\n one_face = fine_vertex_codes.shape[1] == 1\n\n fine_vertex_codes = rearrange(fine_vertex_codes, 'b nf n d -> (b nf) n d')\n\n if one_face:\n fine_vertex_codes = fine_vertex_codes[:, :(curr_vertex_pos + 1)]\n\n attended_vertex_codes, fine_cache = self.fine_decoder(\n fine_vertex_codes,\n cache = fine_cache,\n return_hiddens = True\n )\n\n if not should_cache_fine:\n fine_cache = None\n\n if not one_face:\n # reconstitute original sequence\n\n embed = rearrange(attended_vertex_codes, '(b nf) n d -> b (nf n) d', b = batch)\n embed = embed[:, :(code_len + 1)]\n else:\n embed = attended_vertex_codes\n\n # logits\n\n logits = self.to_logits(embed)\n\n if not return_loss:\n if not return_cache:\n return logits\n\n next_cache = (\n attended_face_codes,\n coarse_cache,\n fine_cache,\n coarse_gateloop_cache,\n fine_gateloop_cache\n )\n\n return logits, next_cache\n\n # loss\n\n ce_loss = F.cross_entropy(\n rearrange(logits, 'b n c -> b c n'),\n labels,\n ignore_index = self.pad_id\n )\n\n return ce_loss" } ]
from pathlib import Path from functools import partial from packaging import version from contextlib import nullcontext, contextmanager from torch import nn, Tensor from torch.nn import Module from torch.utils.data import Dataset, DataLoader from torch.optim.lr_scheduler import _LRScheduler from pytorch_custom_utils import ( get_adam_optimizer, OptimizerWithWarmupSchedule, add_wandb_tracker_contextmanager ) from accelerate import Accelerator from accelerate.utils import DistributedDataParallelKwargs from beartype import beartype from beartype.door import is_bearable from beartype.typing import Optional, Tuple, Type, List from ema_pytorch import EMA from meshgpt_pytorch.data import custom_collate from meshgpt_pytorch.version import __version__ from meshgpt_pytorch.meshgpt_pytorch import ( MeshAutoencoder, MeshTransformer ) import torch import torch.nn.functional as F
11,743
# constants DEFAULT_DDP_KWARGS = DistributedDataParallelKwargs( find_unused_parameters = True ) # helper functions def exists(v): return v is not None def default(v, d): return v if exists(v) else d def divisible_by(num, den): return (num % den) == 0 def cycle(dl): while True: for data in dl: yield data def maybe_del(d: dict, *keys): for key in keys: if key not in d: continue del d[key] # autoencoder trainer @add_wandb_tracker_contextmanager() class MeshAutoencoderTrainer(Module): @beartype def __init__( self, model: MeshAutoencoder, dataset: Dataset, num_train_steps: int, batch_size: int, grad_accum_every: int, val_dataset: Optional[Dataset] = None, val_every: int = 100, val_num_batches: int = 5, learning_rate: float = 1e-4, weight_decay: float = 0., max_grad_norm: Optional[float] = None, ema_kwargs: dict = dict(), scheduler: Optional[Type[_LRScheduler]] = None, scheduler_kwargs: dict = dict(), accelerator_kwargs: dict = dict(), optimizer_kwargs: dict = dict(), checkpoint_every = 1000, checkpoint_folder = './checkpoints', data_kwargs: Tuple[str, ...] = ['vertices', 'faces', 'face_edges'], warmup_steps = 1000, use_wandb_tracking = False ): super().__init__() # experiment tracker self.use_wandb_tracking = use_wandb_tracking if use_wandb_tracking: accelerator_kwargs['log_with'] = 'wandb' if 'kwargs_handlers' not in accelerator_kwargs: accelerator_kwargs['kwargs_handlers'] = [DEFAULT_DDP_KWARGS] # accelerator self.accelerator = Accelerator(**accelerator_kwargs) self.model = model if self.is_main: self.ema_model = EMA(model, **ema_kwargs) self.optimizer = OptimizerWithWarmupSchedule( accelerator = self.accelerator, optimizer = get_adam_optimizer(model.parameters(), lr = learning_rate, wd = weight_decay, **optimizer_kwargs), scheduler = scheduler, scheduler_kwargs = scheduler_kwargs, warmup_steps = warmup_steps, max_grad_norm = max_grad_norm ) self.dataloader = DataLoader( dataset, batch_size = batch_size, shuffle = True, drop_last = True,
# constants DEFAULT_DDP_KWARGS = DistributedDataParallelKwargs( find_unused_parameters = True ) # helper functions def exists(v): return v is not None def default(v, d): return v if exists(v) else d def divisible_by(num, den): return (num % den) == 0 def cycle(dl): while True: for data in dl: yield data def maybe_del(d: dict, *keys): for key in keys: if key not in d: continue del d[key] # autoencoder trainer @add_wandb_tracker_contextmanager() class MeshAutoencoderTrainer(Module): @beartype def __init__( self, model: MeshAutoencoder, dataset: Dataset, num_train_steps: int, batch_size: int, grad_accum_every: int, val_dataset: Optional[Dataset] = None, val_every: int = 100, val_num_batches: int = 5, learning_rate: float = 1e-4, weight_decay: float = 0., max_grad_norm: Optional[float] = None, ema_kwargs: dict = dict(), scheduler: Optional[Type[_LRScheduler]] = None, scheduler_kwargs: dict = dict(), accelerator_kwargs: dict = dict(), optimizer_kwargs: dict = dict(), checkpoint_every = 1000, checkpoint_folder = './checkpoints', data_kwargs: Tuple[str, ...] = ['vertices', 'faces', 'face_edges'], warmup_steps = 1000, use_wandb_tracking = False ): super().__init__() # experiment tracker self.use_wandb_tracking = use_wandb_tracking if use_wandb_tracking: accelerator_kwargs['log_with'] = 'wandb' if 'kwargs_handlers' not in accelerator_kwargs: accelerator_kwargs['kwargs_handlers'] = [DEFAULT_DDP_KWARGS] # accelerator self.accelerator = Accelerator(**accelerator_kwargs) self.model = model if self.is_main: self.ema_model = EMA(model, **ema_kwargs) self.optimizer = OptimizerWithWarmupSchedule( accelerator = self.accelerator, optimizer = get_adam_optimizer(model.parameters(), lr = learning_rate, wd = weight_decay, **optimizer_kwargs), scheduler = scheduler, scheduler_kwargs = scheduler_kwargs, warmup_steps = warmup_steps, max_grad_norm = max_grad_norm ) self.dataloader = DataLoader( dataset, batch_size = batch_size, shuffle = True, drop_last = True,
collate_fn = partial(custom_collate, pad_id = model.pad_id)
0
2023-11-29 14:58:15+00:00
16k
alvinliu0/HumanGaussian
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n scene = trimesh.load(mesh_path)\n if isinstance(scene, trimesh.Trimesh):\n mesh = scene\n elif isinstance(scene, trimesh.scene.Scene):\n mesh = trimesh.Trimesh()\n for obj in scene.geometry.values():\n mesh = trimesh.util.concatenate([mesh, obj])\n else:\n raise ValueError(f\"Unknown mesh type at {mesh_path}.\")\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(1000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # explicit broadcast to ensure param consistency across ranks\n for param in self.parameters():\n broadcast(param, src=0)\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (\n 0.5\n * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 3 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (sdf_offset[..., 0::1, 0] - sdf) / eps\n normal = F.normalize(sdf_grad, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n sdf_grad = normal\n elif self.cfg.normal_type == \"analytic\":\n sdf_grad = -torch.autograd.grad(\n sdf,\n points_unscaled,\n grad_outputs=torch.ones_like(sdf),\n create_graph=True,\n )[0]\n normal = F.normalize(sdf_grad, dim=-1)\n if not grad_enabled:\n sdf_grad = sdf_grad.detach()\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update(\n {\"normal\": normal, \"shading_normal\": normal, \"sdf_grad\": sdf_grad}\n )\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n return sdf, deformation\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n # TODO: use raw density\n eps = self.cfg.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 3 1\"] = self.forward_density(\n points_offset\n )\n normal = -(density_offset[..., 0::1, 0] - density) / eps\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n atlas.generate(co, po)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "broadcast", "path": "threestudio/utils/misc.py", "snippet": "def broadcast(tensor, src=0):\n if not _distributed_available():\n return tensor\n else:\n torch.distributed.broadcast(tensor, src=src)\n return tensor" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" } ]
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF
13,769
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, )
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, )
self.mesh: Optional[Mesh] = None
6
2023-11-27 02:39:39+00:00
16k
EricGuo5513/momask-codes
train_res_transformer.py
[ { "identifier": "ResidualTransformer", "path": "models/mask_transformer/transformer.py", "snippet": "class ResidualTransformer(nn.Module):\n def __init__(self, code_dim, cond_mode, latent_dim=256, ff_size=1024, num_layers=8, cond_drop_prob=0.1,\n num_heads=4, dropout=0.1, clip_dim=512, shared_codebook=False, share_weight=False,\n clip_version=None, opt=None, **kargs):\n super(ResidualTransformer, self).__init__()\n print(f'latent_dim: {latent_dim}, ff_size: {ff_size}, nlayers: {num_layers}, nheads: {num_heads}, dropout: {dropout}')\n\n # assert shared_codebook == True, \"Only support shared codebook right now!\"\n\n self.code_dim = code_dim\n self.latent_dim = latent_dim\n self.clip_dim = clip_dim\n self.dropout = dropout\n self.opt = opt\n\n self.cond_mode = cond_mode\n # self.cond_drop_prob = cond_drop_prob\n\n if self.cond_mode == 'action':\n assert 'num_actions' in kargs\n self.num_actions = kargs.get('num_actions', 1)\n self.cond_drop_prob = cond_drop_prob\n\n '''\n Preparing Networks\n '''\n self.input_process = InputProcess(self.code_dim, self.latent_dim)\n self.position_enc = PositionalEncoding(self.latent_dim, self.dropout)\n\n seqTransEncoderLayer = nn.TransformerEncoderLayer(d_model=self.latent_dim,\n nhead=num_heads,\n dim_feedforward=ff_size,\n dropout=dropout,\n activation='gelu')\n\n self.seqTransEncoder = nn.TransformerEncoder(seqTransEncoderLayer,\n num_layers=num_layers)\n\n self.encode_quant = partial(F.one_hot, num_classes=self.opt.num_quantizers)\n self.encode_action = partial(F.one_hot, num_classes=self.num_actions)\n\n self.quant_emb = nn.Linear(self.opt.num_quantizers, self.latent_dim)\n # if self.cond_mode != 'no_cond':\n if self.cond_mode == 'text':\n self.cond_emb = nn.Linear(self.clip_dim, self.latent_dim)\n elif self.cond_mode == 'action':\n self.cond_emb = nn.Linear(self.num_actions, self.latent_dim)\n else:\n raise KeyError(\"Unsupported condition mode!!!\")\n\n\n _num_tokens = opt.num_tokens + 1 # one dummy tokens for padding\n self.pad_id = opt.num_tokens\n\n # self.output_process = OutputProcess_Bert(out_feats=opt.num_tokens, latent_dim=latent_dim)\n self.output_process = OutputProcess(out_feats=code_dim, latent_dim=latent_dim)\n\n if shared_codebook:\n token_embed = nn.Parameter(torch.normal(mean=0, std=0.02, size=(_num_tokens, code_dim)))\n self.token_embed_weight = token_embed.expand(opt.num_quantizers-1, _num_tokens, code_dim)\n if share_weight:\n self.output_proj_weight = self.token_embed_weight\n self.output_proj_bias = None\n else:\n output_proj = nn.Parameter(torch.normal(mean=0, std=0.02, size=(_num_tokens, code_dim)))\n output_bias = nn.Parameter(torch.zeros(size=(_num_tokens,)))\n # self.output_proj_bias = 0\n self.output_proj_weight = output_proj.expand(opt.num_quantizers-1, _num_tokens, code_dim)\n self.output_proj_bias = output_bias.expand(opt.num_quantizers-1, _num_tokens)\n\n else:\n if share_weight:\n self.embed_proj_shared_weight = nn.Parameter(torch.normal(mean=0, std=0.02, size=(opt.num_quantizers - 2, _num_tokens, code_dim)))\n self.token_embed_weight_ = nn.Parameter(torch.normal(mean=0, std=0.02, size=(1, _num_tokens, code_dim)))\n self.output_proj_weight_ = nn.Parameter(torch.normal(mean=0, std=0.02, size=(1, _num_tokens, code_dim)))\n self.output_proj_bias = None\n self.registered = False\n else:\n output_proj_weight = torch.normal(mean=0, std=0.02,\n size=(opt.num_quantizers - 1, _num_tokens, code_dim))\n\n self.output_proj_weight = nn.Parameter(output_proj_weight)\n self.output_proj_bias = nn.Parameter(torch.zeros(size=(opt.num_quantizers, _num_tokens)))\n token_embed_weight = torch.normal(mean=0, std=0.02,\n size=(opt.num_quantizers - 1, _num_tokens, code_dim))\n self.token_embed_weight = nn.Parameter(token_embed_weight)\n\n self.apply(self.__init_weights)\n self.shared_codebook = shared_codebook\n self.share_weight = share_weight\n\n if self.cond_mode == 'text':\n print('Loading CLIP...')\n self.clip_version = clip_version\n self.clip_model = self.load_and_freeze_clip(clip_version)\n\n # def\n\n def mask_cond(self, cond, force_mask=False):\n bs, d = cond.shape\n if force_mask:\n return torch.zeros_like(cond)\n elif self.training and self.cond_drop_prob > 0.:\n mask = torch.bernoulli(torch.ones(bs, device=cond.device) * self.cond_drop_prob).view(bs, 1)\n return cond * (1. - mask)\n else:\n return cond\n\n def __init_weights(self, module):\n if isinstance(module, (nn.Linear, nn.Embedding)):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n def parameters_wo_clip(self):\n return [p for name, p in self.named_parameters() if not name.startswith('clip_model.')]\n\n def load_and_freeze_clip(self, clip_version):\n clip_model, clip_preprocess = clip.load(clip_version, device='cpu',\n jit=False) # Must set jit=False for training\n # Cannot run on cpu\n clip.model.convert_weights(\n clip_model) # Actually this line is unnecessary since clip by default already on float16\n # Date 0707: It's necessary, only unecessary when load directly to gpu. Disable if need to run on cpu\n\n # Freeze CLIP weights\n clip_model.eval()\n for p in clip_model.parameters():\n p.requires_grad = False\n\n return clip_model\n\n def encode_text(self, raw_text):\n device = next(self.parameters()).device\n text = clip.tokenize(raw_text, truncate=True).to(device)\n feat_clip_text = self.clip_model.encode_text(text).float()\n return feat_clip_text\n\n\n def q_schedule(self, bs, low, high):\n noise = uniform((bs,), device=self.opt.device)\n schedule = 1 - cosine_schedule(noise)\n return torch.round(schedule * (high - low)) + low\n\n def process_embed_proj_weight(self):\n if self.share_weight and (not self.shared_codebook):\n # if not self.registered:\n self.output_proj_weight = torch.cat([self.embed_proj_shared_weight, self.output_proj_weight_], dim=0)\n self.token_embed_weight = torch.cat([self.token_embed_weight_, self.embed_proj_shared_weight], dim=0)\n # self.registered = True\n\n def output_project(self, logits, qids):\n '''\n :logits: (bs, code_dim, seqlen)\n :qids: (bs)\n\n :return:\n -logits (bs, ntoken, seqlen)\n '''\n # (num_qlayers-1, num_token, code_dim) -> (bs, ntoken, code_dim)\n output_proj_weight = self.output_proj_weight[qids]\n # (num_qlayers, ntoken) -> (bs, ntoken)\n output_proj_bias = None if self.output_proj_bias is None else self.output_proj_bias[qids]\n\n output = torch.einsum('bnc, bcs->bns', output_proj_weight, logits)\n if output_proj_bias is not None:\n output += output + output_proj_bias.unsqueeze(-1)\n return output\n\n\n\n def trans_forward(self, motion_codes, qids, cond, padding_mask, force_mask=False):\n '''\n :param motion_codes: (b, seqlen, d)\n :padding_mask: (b, seqlen), all pad positions are TRUE else FALSE\n :param qids: (b), quantizer layer ids\n :param cond: (b, embed_dim) for text, (b, num_actions) for action\n :return:\n -logits: (b, num_token, seqlen)\n '''\n cond = self.mask_cond(cond, force_mask=force_mask)\n\n # (b, seqlen, d) -> (seqlen, b, latent_dim)\n x = self.input_process(motion_codes)\n\n # (b, num_quantizer)\n q_onehot = self.encode_quant(qids).float().to(x.device)\n\n q_emb = self.quant_emb(q_onehot).unsqueeze(0) # (1, b, latent_dim)\n cond = self.cond_emb(cond).unsqueeze(0) # (1, b, latent_dim)\n\n x = self.position_enc(x)\n xseq = torch.cat([cond, q_emb, x], dim=0) # (seqlen+2, b, latent_dim)\n\n padding_mask = torch.cat([torch.zeros_like(padding_mask[:, 0:2]), padding_mask], dim=1) # (b, seqlen+2)\n output = self.seqTransEncoder(xseq, src_key_padding_mask=padding_mask)[2:] # (seqlen, b, e)\n logits = self.output_process(output)\n return logits\n\n def forward_with_cond_scale(self,\n motion_codes,\n q_id,\n cond_vector,\n padding_mask,\n cond_scale=3,\n force_mask=False):\n bs = motion_codes.shape[0]\n # if cond_scale == 1:\n qids = torch.full((bs,), q_id, dtype=torch.long, device=motion_codes.device)\n if force_mask:\n logits = self.trans_forward(motion_codes, qids, cond_vector, padding_mask, force_mask=True)\n logits = self.output_project(logits, qids-1)\n return logits\n\n logits = self.trans_forward(motion_codes, qids, cond_vector, padding_mask)\n logits = self.output_project(logits, qids-1)\n if cond_scale == 1:\n return logits\n\n aux_logits = self.trans_forward(motion_codes, qids, cond_vector, padding_mask, force_mask=True)\n aux_logits = self.output_project(aux_logits, qids-1)\n\n scaled_logits = aux_logits + (logits - aux_logits) * cond_scale\n return scaled_logits\n\n def forward(self, all_indices, y, m_lens):\n '''\n :param all_indices: (b, n, q)\n :param y: raw text for cond_mode=text, (b, ) for cond_mode=action\n :m_lens: (b,)\n :return:\n '''\n\n self.process_embed_proj_weight()\n\n bs, ntokens, num_quant_layers = all_indices.shape\n device = all_indices.device\n\n # Positions that are PADDED are ALL FALSE\n non_pad_mask = lengths_to_mask(m_lens, ntokens) # (b, n)\n\n q_non_pad_mask = repeat(non_pad_mask, 'b n -> b n q', q=num_quant_layers)\n all_indices = torch.where(q_non_pad_mask, all_indices, self.pad_id) #(b, n, q)\n\n # randomly sample quantization layers to work on, [1, num_q)\n active_q_layers = q_schedule(bs, low=1, high=num_quant_layers, device=device)\n\n # print(self.token_embed_weight.shape, all_indices.shape)\n token_embed = repeat(self.token_embed_weight, 'q c d-> b c d q', b=bs)\n gather_indices = repeat(all_indices[..., :-1], 'b n q -> b n d q', d=token_embed.shape[2])\n # print(token_embed.shape, gather_indices.shape)\n all_codes = token_embed.gather(1, gather_indices) # (b, n, d, q-1)\n\n cumsum_codes = torch.cumsum(all_codes, dim=-1) #(b, n, d, q-1)\n\n active_indices = all_indices[torch.arange(bs), :, active_q_layers] # (b, n)\n history_sum = cumsum_codes[torch.arange(bs), :, :, active_q_layers - 1]\n\n force_mask = False\n if self.cond_mode == 'text':\n with torch.no_grad():\n cond_vector = self.encode_text(y)\n elif self.cond_mode == 'action':\n cond_vector = self.enc_action(y).to(device).float()\n elif self.cond_mode == 'uncond':\n cond_vector = torch.zeros(bs, self.latent_dim).float().to(device)\n force_mask = True\n else:\n raise NotImplementedError(\"Unsupported condition mode!!!\")\n\n logits = self.trans_forward(history_sum, active_q_layers, cond_vector, ~non_pad_mask, force_mask)\n logits = self.output_project(logits, active_q_layers-1)\n ce_loss, pred_id, acc = cal_performance(logits, active_indices, ignore_index=self.pad_id)\n\n return ce_loss, pred_id, acc\n\n @torch.no_grad()\n @eval_decorator\n def generate(self,\n motion_ids,\n conds,\n m_lens,\n temperature=1,\n topk_filter_thres=0.9,\n cond_scale=2,\n num_res_layers=-1, # If it's -1, use all.\n ):\n\n # print(self.opt.num_quantizers)\n # assert len(timesteps) >= len(cond_scales) == self.opt.num_quantizers\n self.process_embed_proj_weight()\n\n device = next(self.parameters()).device\n seq_len = motion_ids.shape[1]\n batch_size = len(conds)\n\n if self.cond_mode == 'text':\n with torch.no_grad():\n cond_vector = self.encode_text(conds)\n elif self.cond_mode == 'action':\n cond_vector = self.enc_action(conds).to(device)\n elif self.cond_mode == 'uncond':\n cond_vector = torch.zeros(batch_size, self.latent_dim).float().to(device)\n else:\n raise NotImplementedError(\"Unsupported condition mode!!!\")\n\n # token_embed = repeat(self.token_embed_weight, 'c d -> b c d', b=batch_size)\n # gathered_ids = repeat(motion_ids, 'b n -> b n d', d=token_embed.shape[-1])\n # history_sum = token_embed.gather(1, gathered_ids)\n\n # print(pa, seq_len)\n padding_mask = ~lengths_to_mask(m_lens, seq_len)\n # print(padding_mask.shape, motion_ids.shape)\n motion_ids = torch.where(padding_mask, self.pad_id, motion_ids)\n all_indices = [motion_ids]\n history_sum = 0\n num_quant_layers = self.opt.num_quantizers if num_res_layers==-1 else num_res_layers+1\n\n for i in range(1, num_quant_layers):\n # print(f\"--> Working on {i}-th quantizer\")\n # Start from all tokens being masked\n # qids = torch.full((batch_size,), i, dtype=torch.long, device=motion_ids.device)\n token_embed = self.token_embed_weight[i-1]\n token_embed = repeat(token_embed, 'c d -> b c d', b=batch_size)\n gathered_ids = repeat(motion_ids, 'b n -> b n d', d=token_embed.shape[-1])\n history_sum += token_embed.gather(1, gathered_ids)\n\n logits = self.forward_with_cond_scale(history_sum, i, cond_vector, padding_mask, cond_scale=cond_scale)\n # logits = self.trans_forward(history_sum, qids, cond_vector, padding_mask)\n\n logits = logits.permute(0, 2, 1) # (b, seqlen, ntoken)\n # clean low prob token\n filtered_logits = top_k(logits, topk_filter_thres, dim=-1)\n\n pred_ids = gumbel_sample(filtered_logits, temperature=temperature, dim=-1) # (b, seqlen)\n\n # probs = F.softmax(filtered_logits, dim=-1) # (b, seqlen, ntoken)\n # # print(temperature, starting_temperature, steps_until_x0, timesteps)\n # # print(probs / temperature)\n # pred_ids = Categorical(probs / temperature).sample() # (b, seqlen)\n\n ids = torch.where(padding_mask, self.pad_id, pred_ids)\n\n motion_ids = ids\n all_indices.append(ids)\n\n all_indices = torch.stack(all_indices, dim=-1)\n # padding_mask = repeat(padding_mask, 'b n -> b n q', q=all_indices.shape[-1])\n # all_indices = torch.where(padding_mask, -1, all_indices)\n all_indices = torch.where(all_indices==self.pad_id, -1, all_indices)\n # all_indices = all_indices.masked_fill()\n return all_indices\n\n @torch.no_grad()\n @eval_decorator\n def edit(self,\n motion_ids,\n conds,\n m_lens,\n temperature=1,\n topk_filter_thres=0.9,\n cond_scale=2\n ):\n\n # print(self.opt.num_quantizers)\n # assert len(timesteps) >= len(cond_scales) == self.opt.num_quantizers\n self.process_embed_proj_weight()\n\n device = next(self.parameters()).device\n seq_len = motion_ids.shape[1]\n batch_size = len(conds)\n\n if self.cond_mode == 'text':\n with torch.no_grad():\n cond_vector = self.encode_text(conds)\n elif self.cond_mode == 'action':\n cond_vector = self.enc_action(conds).to(device)\n elif self.cond_mode == 'uncond':\n cond_vector = torch.zeros(batch_size, self.latent_dim).float().to(device)\n else:\n raise NotImplementedError(\"Unsupported condition mode!!!\")\n\n # token_embed = repeat(self.token_embed_weight, 'c d -> b c d', b=batch_size)\n # gathered_ids = repeat(motion_ids, 'b n -> b n d', d=token_embed.shape[-1])\n # history_sum = token_embed.gather(1, gathered_ids)\n\n # print(pa, seq_len)\n padding_mask = ~lengths_to_mask(m_lens, seq_len)\n # print(padding_mask.shape, motion_ids.shape)\n motion_ids = torch.where(padding_mask, self.pad_id, motion_ids)\n all_indices = [motion_ids]\n history_sum = 0\n\n for i in range(1, self.opt.num_quantizers):\n # print(f\"--> Working on {i}-th quantizer\")\n # Start from all tokens being masked\n # qids = torch.full((batch_size,), i, dtype=torch.long, device=motion_ids.device)\n token_embed = self.token_embed_weight[i-1]\n token_embed = repeat(token_embed, 'c d -> b c d', b=batch_size)\n gathered_ids = repeat(motion_ids, 'b n -> b n d', d=token_embed.shape[-1])\n history_sum += token_embed.gather(1, gathered_ids)\n\n logits = self.forward_with_cond_scale(history_sum, i, cond_vector, padding_mask, cond_scale=cond_scale)\n # logits = self.trans_forward(history_sum, qids, cond_vector, padding_mask)\n\n logits = logits.permute(0, 2, 1) # (b, seqlen, ntoken)\n # clean low prob token\n filtered_logits = top_k(logits, topk_filter_thres, dim=-1)\n\n pred_ids = gumbel_sample(filtered_logits, temperature=temperature, dim=-1) # (b, seqlen)\n\n # probs = F.softmax(filtered_logits, dim=-1) # (b, seqlen, ntoken)\n # # print(temperature, starting_temperature, steps_until_x0, timesteps)\n # # print(probs / temperature)\n # pred_ids = Categorical(probs / temperature).sample() # (b, seqlen)\n\n ids = torch.where(padding_mask, self.pad_id, pred_ids)\n\n motion_ids = ids\n all_indices.append(ids)\n\n all_indices = torch.stack(all_indices, dim=-1)\n # padding_mask = repeat(padding_mask, 'b n -> b n q', q=all_indices.shape[-1])\n # all_indices = torch.where(padding_mask, -1, all_indices)\n all_indices = torch.where(all_indices==self.pad_id, -1, all_indices)\n # all_indices = all_indices.masked_fill()\n return all_indices" }, { "identifier": "ResidualTransformerTrainer", "path": "models/mask_transformer/transformer_trainer.py", "snippet": "class ResidualTransformerTrainer:\n def __init__(self, args, res_transformer, vq_model):\n self.opt = args\n self.res_transformer = res_transformer\n self.vq_model = vq_model\n self.device = args.device\n self.vq_model.eval()\n\n if args.is_train:\n self.logger = SummaryWriter(args.log_dir)\n # self.l1_criterion = torch.nn.SmoothL1Loss()\n\n\n def update_lr_warm_up(self, nb_iter, warm_up_iter, lr):\n\n current_lr = lr * (nb_iter + 1) / (warm_up_iter + 1)\n for param_group in self.opt_res_transformer.param_groups:\n param_group[\"lr\"] = current_lr\n\n return current_lr\n\n\n def forward(self, batch_data):\n\n conds, motion, m_lens = batch_data\n motion = motion.detach().float().to(self.device)\n m_lens = m_lens.detach().long().to(self.device)\n\n # (b, n, q), (q, b, n ,d)\n code_idx, all_codes = self.vq_model.encode(motion)\n m_lens = m_lens // 4\n\n conds = conds.to(self.device).float() if torch.is_tensor(conds) else conds\n\n ce_loss, pred_ids, acc = self.res_transformer(code_idx, conds, m_lens)\n\n return ce_loss, acc\n\n def update(self, batch_data):\n loss, acc = self.forward(batch_data)\n\n self.opt_res_transformer.zero_grad()\n loss.backward()\n self.opt_res_transformer.step()\n self.scheduler.step()\n\n return loss.item(), acc\n\n def save(self, file_name, ep, total_it):\n res_trans_state_dict = self.res_transformer.state_dict()\n clip_weights = [e for e in res_trans_state_dict.keys() if e.startswith('clip_model.')]\n for e in clip_weights:\n del res_trans_state_dict[e]\n state = {\n 'res_transformer': res_trans_state_dict,\n 'opt_res_transformer': self.opt_res_transformer.state_dict(),\n 'scheduler':self.scheduler.state_dict(),\n 'ep': ep,\n 'total_it': total_it,\n }\n torch.save(state, file_name)\n\n def resume(self, model_dir):\n checkpoint = torch.load(model_dir, map_location=self.device)\n missing_keys, unexpected_keys = self.res_transformer.load_state_dict(checkpoint['res_transformer'], strict=False)\n assert len(unexpected_keys) == 0\n assert all([k.startswith('clip_model.') for k in missing_keys])\n\n try:\n self.opt_res_transformer.load_state_dict(checkpoint['opt_res_transformer']) # Optimizer\n\n self.scheduler.load_state_dict(checkpoint['scheduler']) # Scheduler\n except:\n print('Resume wo optimizer')\n return checkpoint['ep'], checkpoint['total_it']\n\n def train(self, train_loader, val_loader, eval_val_loader, eval_wrapper, plot_eval):\n self.res_transformer.to(self.device)\n self.vq_model.to(self.device)\n\n self.opt_res_transformer = optim.AdamW(self.res_transformer.parameters(), betas=(0.9, 0.99), lr=self.opt.lr, weight_decay=1e-5)\n self.scheduler = optim.lr_scheduler.MultiStepLR(self.opt_res_transformer,\n milestones=self.opt.milestones,\n gamma=self.opt.gamma)\n\n epoch = 0\n it = 0\n\n if self.opt.is_continue:\n model_dir = pjoin(self.opt.model_dir, 'latest.tar') # TODO\n epoch, it = self.resume(model_dir)\n print(\"Load model epoch:%d iterations:%d\"%(epoch, it))\n\n start_time = time.time()\n total_iters = self.opt.max_epoch * len(train_loader)\n print(f'Total Epochs: {self.opt.max_epoch}, Total Iters: {total_iters}')\n print('Iters Per Epoch, Training: %04d, Validation: %03d' % (len(train_loader), len(val_loader)))\n logs = defaultdict(def_value, OrderedDict())\n\n best_fid, best_div, best_top1, best_top2, best_top3, best_matching, writer = evaluation_res_transformer(\n self.opt.save_root, eval_val_loader, self.res_transformer, self.vq_model, self.logger, epoch,\n best_fid=100, best_div=100,\n best_top1=0, best_top2=0, best_top3=0,\n best_matching=100, eval_wrapper=eval_wrapper,\n plot_func=plot_eval, save_ckpt=False, save_anim=False\n )\n best_loss = 100\n best_acc = 0\n\n while epoch < self.opt.max_epoch:\n self.res_transformer.train()\n self.vq_model.eval()\n\n for i, batch in enumerate(train_loader):\n it += 1\n if it < self.opt.warm_up_iter:\n self.update_lr_warm_up(it, self.opt.warm_up_iter, self.opt.lr)\n\n loss, acc = self.update(batch_data=batch)\n logs['loss'] += loss\n logs[\"acc\"] += acc\n logs['lr'] += self.opt_res_transformer.param_groups[0]['lr']\n\n if it % self.opt.log_every == 0:\n mean_loss = OrderedDict()\n # self.logger.add_scalar('val_loss', val_loss, it)\n # self.l\n for tag, value in logs.items():\n self.logger.add_scalar('Train/%s'%tag, value / self.opt.log_every, it)\n mean_loss[tag] = value / self.opt.log_every\n logs = defaultdict(def_value, OrderedDict())\n print_current_loss(start_time, it, total_iters, mean_loss, epoch=epoch, inner_iter=i)\n\n if it % self.opt.save_latest == 0:\n self.save(pjoin(self.opt.model_dir, 'latest.tar'), epoch, it)\n\n epoch += 1\n self.save(pjoin(self.opt.model_dir, 'latest.tar'), epoch, it)\n\n print('Validation time:')\n self.vq_model.eval()\n self.res_transformer.eval()\n\n val_loss = []\n val_acc = []\n with torch.no_grad():\n for i, batch_data in enumerate(val_loader):\n loss, acc = self.forward(batch_data)\n val_loss.append(loss.item())\n val_acc.append(acc)\n\n print(f\"Validation loss:{np.mean(val_loss):.3f}, Accuracy:{np.mean(val_acc):.3f}\")\n\n self.logger.add_scalar('Val/loss', np.mean(val_loss), epoch)\n self.logger.add_scalar('Val/acc', np.mean(val_acc), epoch)\n\n if np.mean(val_loss) < best_loss:\n print(f\"Improved loss from {best_loss:.02f} to {np.mean(val_loss)}!!!\")\n self.save(pjoin(self.opt.model_dir, 'net_best_loss.tar'), epoch, it)\n best_loss = np.mean(val_loss)\n\n if np.mean(val_acc) > best_acc:\n print(f\"Improved acc from {best_acc:.02f} to {np.mean(val_acc)}!!!\")\n # self.save(pjoin(self.opt.model_dir, 'net_best_loss.tar'), epoch, it)\n best_acc = np.mean(val_acc)\n\n best_fid, best_div, best_top1, best_top2, best_top3, best_matching, writer = evaluation_res_transformer(\n self.opt.save_root, eval_val_loader, self.res_transformer, self.vq_model, self.logger, epoch, best_fid=best_fid,\n best_div=best_div, best_top1=best_top1, best_top2=best_top2, best_top3=best_top3,\n best_matching=best_matching, eval_wrapper=eval_wrapper,\n plot_func=plot_eval, save_ckpt=True, save_anim=(epoch%self.opt.eval_every_e==0)\n )" }, { "identifier": "RVQVAE", "path": "models/vq/model.py", "snippet": "class RVQVAE(nn.Module):\n def __init__(self,\n args,\n input_width=263,\n nb_code=1024,\n code_dim=512,\n output_emb_width=512,\n down_t=3,\n stride_t=2,\n width=512,\n depth=3,\n dilation_growth_rate=3,\n activation='relu',\n norm=None):\n\n super().__init__()\n assert output_emb_width == code_dim\n self.code_dim = code_dim\n self.num_code = nb_code\n # self.quant = args.quantizer\n self.encoder = Encoder(input_width, output_emb_width, down_t, stride_t, width, depth,\n dilation_growth_rate, activation=activation, norm=norm)\n self.decoder = Decoder(input_width, output_emb_width, down_t, stride_t, width, depth,\n dilation_growth_rate, activation=activation, norm=norm)\n rvqvae_config = {\n 'num_quantizers': args.num_quantizers,\n 'shared_codebook': args.shared_codebook,\n 'quantize_dropout_prob': args.quantize_dropout_prob,\n 'quantize_dropout_cutoff_index': 0,\n 'nb_code': nb_code,\n 'code_dim':code_dim, \n 'args': args,\n }\n self.quantizer = ResidualVQ(**rvqvae_config)\n\n def preprocess(self, x):\n # (bs, T, Jx3) -> (bs, Jx3, T)\n x = x.permute(0, 2, 1).float()\n return x\n\n def postprocess(self, x):\n # (bs, Jx3, T) -> (bs, T, Jx3)\n x = x.permute(0, 2, 1)\n return x\n\n def encode(self, x):\n N, T, _ = x.shape\n x_in = self.preprocess(x)\n x_encoder = self.encoder(x_in)\n # print(x_encoder.shape)\n code_idx, all_codes = self.quantizer.quantize(x_encoder, return_latent=True)\n # print(code_idx.shape)\n # code_idx = code_idx.view(N, -1)\n # (N, T, Q)\n # print()\n return code_idx, all_codes\n\n def forward(self, x):\n x_in = self.preprocess(x)\n # Encode\n x_encoder = self.encoder(x_in)\n\n ## quantization\n # x_quantized, code_idx, commit_loss, perplexity = self.quantizer(x_encoder, sample_codebook_temp=0.5,\n # force_dropout_index=0) #TODO hardcode\n x_quantized, code_idx, commit_loss, perplexity = self.quantizer(x_encoder, sample_codebook_temp=0.5)\n\n # print(code_idx[0, :, 1])\n ## decoder\n x_out = self.decoder(x_quantized)\n # x_out = self.postprocess(x_decoder)\n return x_out, commit_loss, perplexity\n\n def forward_decoder(self, x):\n x_d = self.quantizer.get_codes_from_indices(x)\n # x_d = x_d.view(1, -1, self.code_dim).permute(0, 2, 1).contiguous()\n x = x_d.sum(dim=0).permute(0, 2, 1)\n\n # decoder\n x_out = self.decoder(x)\n # x_out = self.postprocess(x_decoder)\n return x_out" }, { "identifier": "TrainT2MOptions", "path": "options/train_option.py", "snippet": "class TrainT2MOptions(BaseOptions):\n def initialize(self):\n BaseOptions.initialize(self)\n self.parser.add_argument('--batch_size', type=int, default=64, help='Batch size')\n self.parser.add_argument('--max_epoch', type=int, default=500, help='Maximum number of epoch for training')\n # self.parser.add_argument('--max_iters', type=int, default=150_000, help='Training iterations')\n\n '''LR scheduler'''\n self.parser.add_argument('--lr', type=float, default=2e-4, help='Learning rate')\n self.parser.add_argument('--gamma', type=float, default=0.1, help='Learning rate schedule factor')\n self.parser.add_argument('--milestones', default=[50_000], nargs=\"+\", type=int,\n help=\"learning rate schedule (iterations)\")\n self.parser.add_argument('--warm_up_iter', default=2000, type=int, help='number of total iterations for warmup')\n\n '''Condition'''\n self.parser.add_argument('--cond_drop_prob', type=float, default=0.1, help='Drop ratio of condition, for classifier-free guidance')\n self.parser.add_argument(\"--seed\", default=3407, type=int, help=\"Seed\")\n\n self.parser.add_argument('--is_continue', action=\"store_true\", help='Is this trial continuing previous state?')\n self.parser.add_argument('--gumbel_sample', action=\"store_true\", help='Strategy for token sampling, True: Gumbel sampling, False: Categorical sampling')\n self.parser.add_argument('--share_weight', action=\"store_true\", help='Whether to share weight for projection/embedding, for residual transformer.')\n\n self.parser.add_argument('--log_every', type=int, default=50, help='Frequency of printing training progress, (iteration)')\n # self.parser.add_argument('--save_every_e', type=int, default=100, help='Frequency of printing training progress')\n self.parser.add_argument('--eval_every_e', type=int, default=10, help='Frequency of animating eval results, (epoch)')\n self.parser.add_argument('--save_latest', type=int, default=500, help='Frequency of saving checkpoint, (iteration)')\n\n\n self.is_train = True" }, { "identifier": "plot_3d_motion", "path": "utils/plot_script.py", "snippet": "def plot_3d_motion(save_path, kinematic_tree, joints, title, figsize=(10, 10), fps=120, radius=4):\n matplotlib.use('Agg')\n\n title_sp = title.split(' ')\n if len(title_sp) > 20:\n title = '\\n'.join([' '.join(title_sp[:10]), ' '.join(title_sp[10:20]), ' '.join(title_sp[20:])])\n elif len(title_sp) > 10:\n title = '\\n'.join([' '.join(title_sp[:10]), ' '.join(title_sp[10:])])\n\n def init():\n ax.set_xlim3d([-radius / 2, radius / 2])\n ax.set_ylim3d([0, radius])\n ax.set_zlim3d([0, radius])\n # print(title)\n fig.suptitle(title, fontsize=20)\n ax.grid(b=False)\n\n def plot_xzPlane(minx, maxx, miny, minz, maxz):\n ## Plot a plane XZ\n verts = [\n [minx, miny, minz],\n [minx, miny, maxz],\n [maxx, miny, maxz],\n [maxx, miny, minz]\n ]\n xz_plane = Poly3DCollection([verts])\n xz_plane.set_facecolor((0.5, 0.5, 0.5, 0.5))\n ax.add_collection3d(xz_plane)\n\n # return ax\n\n # (seq_len, joints_num, 3)\n data = joints.copy().reshape(len(joints), -1, 3)\n fig = plt.figure(figsize=figsize)\n ax = p3.Axes3D(fig)\n init()\n MINS = data.min(axis=0).min(axis=0)\n MAXS = data.max(axis=0).max(axis=0)\n colors = ['red', 'blue', 'black', 'red', 'blue',\n 'darkblue', 'darkblue', 'darkblue', 'darkblue', 'darkblue',\n 'darkred', 'darkred', 'darkred', 'darkred', 'darkred']\n frame_number = data.shape[0]\n # print(data.shape)\n\n height_offset = MINS[1]\n data[:, :, 1] -= height_offset\n trajec = data[:, 0, [0, 2]]\n\n data[..., 0] -= data[:, 0:1, 0]\n data[..., 2] -= data[:, 0:1, 2]\n\n # print(trajec.shape)\n\n def update(index):\n # print(index)\n ax.lines = []\n ax.collections = []\n ax.view_init(elev=120, azim=-90)\n ax.dist = 7.5\n # ax =\n plot_xzPlane(MINS[0] - trajec[index, 0], MAXS[0] - trajec[index, 0], 0, MINS[2] - trajec[index, 1],\n MAXS[2] - trajec[index, 1])\n # ax.scatter(data[index, :22, 0], data[index, :22, 1], data[index, :22, 2], color='black', s=3)\n\n if index > 1:\n ax.plot3D(trajec[:index, 0] - trajec[index, 0], np.zeros_like(trajec[:index, 0]),\n trajec[:index, 1] - trajec[index, 1], linewidth=1.0,\n color='blue')\n # ax = plot_xzPlane(ax, MINS[0], MAXS[0], 0, MINS[2], MAXS[2])\n\n for i, (chain, color) in enumerate(zip(kinematic_tree, colors)):\n # print(color)\n if i < 5:\n linewidth = 4.0\n else:\n linewidth = 2.0\n ax.plot3D(data[index, chain, 0], data[index, chain, 1], data[index, chain, 2], linewidth=linewidth,\n color=color)\n # print(trajec[:index, 0].shape)\n\n plt.axis('off')\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n\n ani = FuncAnimation(fig, update, frames=frame_number, interval=1000 / fps, repeat=False)\n\n # writer = FFMpegFileWriter(fps=fps)\n ani.save(save_path, fps=fps)\n plt.close()" }, { "identifier": "recover_from_ric", "path": "utils/motion_process.py", "snippet": "def recover_from_ric(data, joints_num):\n r_rot_quat, r_pos = recover_root_rot_pos(data)\n positions = data[..., 4:(joints_num - 1) * 3 + 4]\n positions = positions.view(positions.shape[:-1] + (-1, 3))\n\n '''Add Y-axis rotation to local joints'''\n positions = qrot(qinv(r_rot_quat[..., None, :]).expand(positions.shape[:-1] + (4,)), positions)\n\n '''Add root XZ to joints'''\n positions[..., 0] += r_pos[..., 0:1]\n positions[..., 2] += r_pos[..., 2:3]\n\n '''Concate root and joints'''\n positions = torch.cat([r_pos.unsqueeze(-2), positions], dim=-2)\n\n return positions" }, { "identifier": "get_opt", "path": "utils/get_opt.py", "snippet": "def get_opt(opt_path, device, **kwargs):\n opt = Namespace()\n opt_dict = vars(opt)\n\n skip = ('-------------- End ----------------',\n '------------ Options -------------',\n '\\n')\n print('Reading', opt_path)\n with open(opt_path, 'r') as f:\n for line in f:\n if line.strip() not in skip:\n # print(line.strip())\n key, value = line.strip('\\n').split(': ')\n if value in ('True', 'False'):\n opt_dict[key] = (value == 'True')\n # print(key, value)\n elif is_float(value):\n opt_dict[key] = float(value)\n elif is_number(value):\n opt_dict[key] = int(value)\n else:\n opt_dict[key] = str(value)\n\n # print(opt)\n opt_dict['which_epoch'] = 'finest'\n opt.save_root = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.name)\n opt.model_dir = pjoin(opt.save_root, 'model')\n opt.meta_dir = pjoin(opt.save_root, 'meta')\n\n if opt.dataset_name == 't2m':\n opt.data_root = './dataset/HumanML3D/'\n opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs')\n opt.text_dir = pjoin(opt.data_root, 'texts')\n opt.joints_num = 22\n opt.dim_pose = 263\n opt.max_motion_length = 196\n opt.max_motion_frame = 196\n opt.max_motion_token = 55\n elif opt.dataset_name == 'kit':\n opt.data_root = './dataset/KIT-ML/'\n opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs')\n opt.text_dir = pjoin(opt.data_root, 'texts')\n opt.joints_num = 21\n opt.dim_pose = 251\n opt.max_motion_length = 196\n opt.max_motion_frame = 196\n opt.max_motion_token = 55\n else:\n raise KeyError('Dataset not recognized')\n if not hasattr(opt, 'unit_length'):\n opt.unit_length = 4\n opt.dim_word = 300\n opt.num_classes = 200 // opt.unit_length\n opt.dim_pos_ohot = len(POS_enumerator)\n opt.is_train = False\n opt.is_continue = False\n opt.device = device\n\n opt_dict.update(kwargs) # Overwrite with kwargs params\n\n return opt" }, { "identifier": "fixseed", "path": "utils/fixseed.py", "snippet": "def fixseed(seed):\n torch.backends.cudnn.benchmark = False\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)" }, { "identifier": "t2m_kinematic_chain", "path": "utils/paramUtil.py", "snippet": "" }, { "identifier": "Text2MotionDataset", "path": "data/t2m_dataset.py", "snippet": "class Text2MotionDataset(data.Dataset):\n def __init__(self, opt, mean, std, split_file):\n self.opt = opt\n self.max_length = 20\n self.pointer = 0\n self.max_motion_length = opt.max_motion_length\n min_motion_len = 40 if self.opt.dataset_name =='t2m' else 24\n\n data_dict = {}\n id_list = []\n with cs.open(split_file, 'r') as f:\n for line in f.readlines():\n id_list.append(line.strip())\n # id_list = id_list[:250]\n\n new_name_list = []\n length_list = []\n for name in tqdm(id_list):\n try:\n motion = np.load(pjoin(opt.motion_dir, name + '.npy'))\n if (len(motion)) < min_motion_len or (len(motion) >= 200):\n continue\n text_data = []\n flag = False\n with cs.open(pjoin(opt.text_dir, name + '.txt')) as f:\n for line in f.readlines():\n text_dict = {}\n line_split = line.strip().split('#')\n # print(line)\n caption = line_split[0]\n tokens = line_split[1].split(' ')\n f_tag = float(line_split[2])\n to_tag = float(line_split[3])\n f_tag = 0.0 if np.isnan(f_tag) else f_tag\n to_tag = 0.0 if np.isnan(to_tag) else to_tag\n\n text_dict['caption'] = caption\n text_dict['tokens'] = tokens\n if f_tag == 0.0 and to_tag == 0.0:\n flag = True\n text_data.append(text_dict)\n else:\n try:\n n_motion = motion[int(f_tag*20) : int(to_tag*20)]\n if (len(n_motion)) < min_motion_len or (len(n_motion) >= 200):\n continue\n new_name = random.choice('ABCDEFGHIJKLMNOPQRSTUVW') + '_' + name\n while new_name in data_dict:\n new_name = random.choice('ABCDEFGHIJKLMNOPQRSTUVW') + '_' + name\n data_dict[new_name] = {'motion': n_motion,\n 'length': len(n_motion),\n 'text':[text_dict]}\n new_name_list.append(new_name)\n length_list.append(len(n_motion))\n except:\n print(line_split)\n print(line_split[2], line_split[3], f_tag, to_tag, name)\n # break\n\n if flag:\n data_dict[name] = {'motion': motion,\n 'length': len(motion),\n 'text': text_data}\n new_name_list.append(name)\n length_list.append(len(motion))\n except Exception as e:\n # print(e)\n pass\n\n # name_list, length_list = zip(*sorted(zip(new_name_list, length_list), key=lambda x: x[1]))\n name_list, length_list = new_name_list, length_list\n\n self.mean = mean\n self.std = std\n self.length_arr = np.array(length_list)\n self.data_dict = data_dict\n self.name_list = name_list\n\n def inv_transform(self, data):\n return data * self.std + self.mean\n\n def __len__(self):\n return len(self.data_dict) - self.pointer\n\n def __getitem__(self, item):\n idx = self.pointer + item\n data = self.data_dict[self.name_list[idx]]\n motion, m_length, text_list = data['motion'], data['length'], data['text']\n # Randomly select a caption\n text_data = random.choice(text_list)\n caption, tokens = text_data['caption'], text_data['tokens']\n\n if self.opt.unit_length < 10:\n coin2 = np.random.choice(['single', 'single', 'double'])\n else:\n coin2 = 'single'\n\n if coin2 == 'double':\n m_length = (m_length // self.opt.unit_length - 1) * self.opt.unit_length\n elif coin2 == 'single':\n m_length = (m_length // self.opt.unit_length) * self.opt.unit_length\n idx = random.randint(0, len(motion) - m_length)\n motion = motion[idx:idx+m_length]\n\n \"Z Normalization\"\n motion = (motion - self.mean) / self.std\n\n if m_length < self.max_motion_length:\n motion = np.concatenate([motion,\n np.zeros((self.max_motion_length - m_length, motion.shape[1]))\n ], axis=0)\n # print(word_embeddings.shape, motion.shape)\n # print(tokens)\n return caption, motion, m_length\n\n def reset_min_len(self, length):\n assert length <= self.max_motion_length\n self.pointer = np.searchsorted(self.length_arr, length)\n print(\"Pointer Pointing at %d\" % self.pointer)" }, { "identifier": "get_dataset_motion_loader", "path": "motion_loaders/dataset_motion_loader.py", "snippet": "def get_dataset_motion_loader(opt_path, batch_size, fname, device):\n opt = get_opt(opt_path, device)\n\n # Configurations of T2M dataset and KIT dataset is almost the same\n if opt.dataset_name == 't2m' or opt.dataset_name == 'kit':\n print('Loading dataset %s ...' % opt.dataset_name)\n\n mean = np.load(pjoin(opt.meta_dir, 'mean.npy'))\n std = np.load(pjoin(opt.meta_dir, 'std.npy'))\n\n w_vectorizer = WordVectorizer('./glove', 'our_vab')\n split_file = pjoin(opt.data_root, '%s.txt'%fname)\n dataset = Text2MotionDatasetEval(opt, mean, std, split_file, w_vectorizer)\n dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=4, drop_last=True,\n collate_fn=collate_fn, shuffle=True)\n else:\n raise KeyError('Dataset not Recognized !!')\n\n print('Ground Truth Dataset Loading Completed!!!')\n return dataloader, dataset" }, { "identifier": "EvaluatorModelWrapper", "path": "models/t2m_eval_wrapper.py", "snippet": "class EvaluatorModelWrapper(object):\n\n def __init__(self, opt):\n\n if opt.dataset_name == 't2m':\n opt.dim_pose = 263\n elif opt.dataset_name == 'kit':\n opt.dim_pose = 251\n else:\n raise KeyError('Dataset not Recognized!!!')\n\n opt.dim_word = 300\n opt.max_motion_length = 196\n opt.dim_pos_ohot = len(POS_enumerator)\n opt.dim_motion_hidden = 1024\n opt.max_text_len = 20\n opt.dim_text_hidden = 512\n opt.dim_coemb_hidden = 512\n\n # print(opt)\n\n self.text_encoder, self.motion_encoder, self.movement_encoder = build_models(opt)\n self.opt = opt\n self.device = opt.device\n\n self.text_encoder.to(opt.device)\n self.motion_encoder.to(opt.device)\n self.movement_encoder.to(opt.device)\n\n self.text_encoder.eval()\n self.motion_encoder.eval()\n self.movement_encoder.eval()\n\n # Please note that the results does not follow the order of inputs\n def get_co_embeddings(self, word_embs, pos_ohot, cap_lens, motions, m_lens):\n with torch.no_grad():\n word_embs = word_embs.detach().to(self.device).float()\n pos_ohot = pos_ohot.detach().to(self.device).float()\n motions = motions.detach().to(self.device).float()\n\n align_idx = np.argsort(m_lens.data.tolist())[::-1].copy()\n motions = motions[align_idx]\n m_lens = m_lens[align_idx]\n\n '''Movement Encoding'''\n movements = self.movement_encoder(motions[..., :-4]).detach()\n m_lens = m_lens // self.opt.unit_length\n motion_embedding = self.motion_encoder(movements, m_lens)\n\n '''Text Encoding'''\n text_embedding = self.text_encoder(word_embs, pos_ohot, cap_lens)\n text_embedding = text_embedding[align_idx]\n return text_embedding, motion_embedding\n\n # Please note that the results does not follow the order of inputs\n def get_motion_embeddings(self, motions, m_lens):\n with torch.no_grad():\n motions = motions.detach().to(self.device).float()\n\n align_idx = np.argsort(m_lens.data.tolist())[::-1].copy()\n motions = motions[align_idx]\n m_lens = m_lens[align_idx]\n\n '''Movement Encoding'''\n movements = self.movement_encoder(motions[..., :-4]).detach()\n m_lens = m_lens // self.opt.unit_length\n motion_embedding = self.motion_encoder(movements, m_lens)\n return motion_embedding" } ]
import os import torch import numpy as np from torch.utils.data import DataLoader from os.path import join as pjoin from models.mask_transformer.transformer import ResidualTransformer from models.mask_transformer.transformer_trainer import ResidualTransformerTrainer from models.vq.model import RVQVAE from options.train_option import TrainT2MOptions from utils.plot_script import plot_3d_motion from utils.motion_process import recover_from_ric from utils.get_opt import get_opt from utils.fixseed import fixseed from utils.paramUtil import t2m_kinematic_chain, kit_kinematic_chain from data.t2m_dataset import Text2MotionDataset from motion_loaders.dataset_motion_loader import get_dataset_motion_loader from models.t2m_eval_wrapper import EvaluatorModelWrapper
13,955
vq_opt.vq_act, vq_opt.vq_norm) ckpt = torch.load(pjoin(vq_opt.checkpoints_dir, vq_opt.dataset_name, vq_opt.name, 'model', 'net_best_fid.tar'), map_location=opt.device) model_key = 'vq_model' if 'vq_model' in ckpt else 'net' vq_model.load_state_dict(ckpt[model_key]) print(f'Loading VQ Model {opt.vq_name}') vq_model.to(opt.device) return vq_model, vq_opt if __name__ == '__main__': parser = TrainT2MOptions() opt = parser.parse() fixseed(opt.seed) opt.device = torch.device("cpu" if opt.gpu_id == -1 else "cuda:" + str(opt.gpu_id)) torch.autograd.set_detect_anomaly(True) opt.save_root = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.name) opt.model_dir = pjoin(opt.save_root, 'model') # opt.meta_dir = pjoin(opt.save_root, 'meta') opt.eval_dir = pjoin(opt.save_root, 'animation') opt.log_dir = pjoin('./log/res/', opt.dataset_name, opt.name) os.makedirs(opt.model_dir, exist_ok=True) # os.makedirs(opt.meta_dir, exist_ok=True) os.makedirs(opt.eval_dir, exist_ok=True) os.makedirs(opt.log_dir, exist_ok=True) if opt.dataset_name == 't2m': opt.data_root = './dataset/HumanML3D' opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs') opt.joints_num = 22 opt.max_motion_len = 55 dim_pose = 263 radius = 4 fps = 20 kinematic_chain = t2m_kinematic_chain dataset_opt_path = './checkpoints/t2m/Comp_v6_KLD005/opt.txt' elif opt.dataset_name == 'kit': #TODO opt.data_root = './dataset/KIT-ML' opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs') opt.joints_num = 21 radius = 240 * 8 fps = 12.5 dim_pose = 251 opt.max_motion_len = 55 kinematic_chain = kit_kinematic_chain dataset_opt_path = './checkpoints/kit/Comp_v6_KLD005/opt.txt' else: raise KeyError('Dataset Does Not Exist') opt.text_dir = pjoin(opt.data_root, 'texts') vq_model, vq_opt = load_vq_model() clip_version = 'ViT-B/32' opt.num_tokens = vq_opt.nb_code opt.num_quantizers = vq_opt.num_quantizers # if opt.is_v2: res_transformer = ResidualTransformer(code_dim=vq_opt.code_dim, cond_mode='text', latent_dim=opt.latent_dim, ff_size=opt.ff_size, num_layers=opt.n_layers, num_heads=opt.n_heads, dropout=opt.dropout, clip_dim=512, shared_codebook=vq_opt.shared_codebook, cond_drop_prob=opt.cond_drop_prob, # codebook=vq_model.quantizer.codebooks[0] if opt.fix_token_emb else None, share_weight=opt.share_weight, clip_version=clip_version, opt=opt) # else: # res_transformer = ResidualTransformer(code_dim=vq_opt.code_dim, # cond_mode='text', # latent_dim=opt.latent_dim, # ff_size=opt.ff_size, # num_layers=opt.n_layers, # num_heads=opt.n_heads, # dropout=opt.dropout, # clip_dim=512, # shared_codebook=vq_opt.shared_codebook, # cond_drop_prob=opt.cond_drop_prob, # # codebook=vq_model.quantizer.codebooks[0] if opt.fix_token_emb else None, # clip_version=clip_version, # opt=opt) all_params = 0 pc_transformer = sum(param.numel() for param in res_transformer.parameters_wo_clip()) print(res_transformer) # print("Total parameters of t2m_transformer net: {:.2f}M".format(pc_transformer / 1000_000)) all_params += pc_transformer print('Total parameters of all models: {:.2f}M'.format(all_params / 1000_000)) mean = np.load(pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'meta', 'mean.npy')) std = np.load(pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'meta', 'std.npy')) train_split_file = pjoin(opt.data_root, 'train.txt') val_split_file = pjoin(opt.data_root, 'val.txt') train_dataset = Text2MotionDataset(opt, mean, std, train_split_file) val_dataset = Text2MotionDataset(opt, mean, std, val_split_file) train_loader = DataLoader(train_dataset, batch_size=opt.batch_size, num_workers=4, shuffle=True, drop_last=True) val_loader = DataLoader(val_dataset, batch_size=opt.batch_size, num_workers=4, shuffle=True, drop_last=True) eval_val_loader, _ = get_dataset_motion_loader(dataset_opt_path, 32, 'val', device=opt.device) wrapper_opt = get_opt(dataset_opt_path, torch.device('cuda')) eval_wrapper = EvaluatorModelWrapper(wrapper_opt)
def plot_t2m(data, save_dir, captions, m_lengths): data = train_dataset.inv_transform(data) # print(ep_curves.shape) for i, (caption, joint_data) in enumerate(zip(captions, data)): joint_data = joint_data[:m_lengths[i]] joint = recover_from_ric(torch.from_numpy(joint_data).float(), opt.joints_num).numpy() save_path = pjoin(save_dir, '%02d.mp4'%i) # print(joint.shape) plot_3d_motion(save_path, kinematic_chain, joint, title=caption, fps=20) def load_vq_model(): opt_path = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'opt.txt') vq_opt = get_opt(opt_path, opt.device) vq_model = RVQVAE(vq_opt, dim_pose, vq_opt.nb_code, vq_opt.code_dim, vq_opt.output_emb_width, vq_opt.down_t, vq_opt.stride_t, vq_opt.width, vq_opt.depth, vq_opt.dilation_growth_rate, vq_opt.vq_act, vq_opt.vq_norm) ckpt = torch.load(pjoin(vq_opt.checkpoints_dir, vq_opt.dataset_name, vq_opt.name, 'model', 'net_best_fid.tar'), map_location=opt.device) model_key = 'vq_model' if 'vq_model' in ckpt else 'net' vq_model.load_state_dict(ckpt[model_key]) print(f'Loading VQ Model {opt.vq_name}') vq_model.to(opt.device) return vq_model, vq_opt if __name__ == '__main__': parser = TrainT2MOptions() opt = parser.parse() fixseed(opt.seed) opt.device = torch.device("cpu" if opt.gpu_id == -1 else "cuda:" + str(opt.gpu_id)) torch.autograd.set_detect_anomaly(True) opt.save_root = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.name) opt.model_dir = pjoin(opt.save_root, 'model') # opt.meta_dir = pjoin(opt.save_root, 'meta') opt.eval_dir = pjoin(opt.save_root, 'animation') opt.log_dir = pjoin('./log/res/', opt.dataset_name, opt.name) os.makedirs(opt.model_dir, exist_ok=True) # os.makedirs(opt.meta_dir, exist_ok=True) os.makedirs(opt.eval_dir, exist_ok=True) os.makedirs(opt.log_dir, exist_ok=True) if opt.dataset_name == 't2m': opt.data_root = './dataset/HumanML3D' opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs') opt.joints_num = 22 opt.max_motion_len = 55 dim_pose = 263 radius = 4 fps = 20 kinematic_chain = t2m_kinematic_chain dataset_opt_path = './checkpoints/t2m/Comp_v6_KLD005/opt.txt' elif opt.dataset_name == 'kit': #TODO opt.data_root = './dataset/KIT-ML' opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs') opt.joints_num = 21 radius = 240 * 8 fps = 12.5 dim_pose = 251 opt.max_motion_len = 55 kinematic_chain = kit_kinematic_chain dataset_opt_path = './checkpoints/kit/Comp_v6_KLD005/opt.txt' else: raise KeyError('Dataset Does Not Exist') opt.text_dir = pjoin(opt.data_root, 'texts') vq_model, vq_opt = load_vq_model() clip_version = 'ViT-B/32' opt.num_tokens = vq_opt.nb_code opt.num_quantizers = vq_opt.num_quantizers # if opt.is_v2: res_transformer = ResidualTransformer(code_dim=vq_opt.code_dim, cond_mode='text', latent_dim=opt.latent_dim, ff_size=opt.ff_size, num_layers=opt.n_layers, num_heads=opt.n_heads, dropout=opt.dropout, clip_dim=512, shared_codebook=vq_opt.shared_codebook, cond_drop_prob=opt.cond_drop_prob, # codebook=vq_model.quantizer.codebooks[0] if opt.fix_token_emb else None, share_weight=opt.share_weight, clip_version=clip_version, opt=opt) # else: # res_transformer = ResidualTransformer(code_dim=vq_opt.code_dim, # cond_mode='text', # latent_dim=opt.latent_dim, # ff_size=opt.ff_size, # num_layers=opt.n_layers, # num_heads=opt.n_heads, # dropout=opt.dropout, # clip_dim=512, # shared_codebook=vq_opt.shared_codebook, # cond_drop_prob=opt.cond_drop_prob, # # codebook=vq_model.quantizer.codebooks[0] if opt.fix_token_emb else None, # clip_version=clip_version, # opt=opt) all_params = 0 pc_transformer = sum(param.numel() for param in res_transformer.parameters_wo_clip()) print(res_transformer) # print("Total parameters of t2m_transformer net: {:.2f}M".format(pc_transformer / 1000_000)) all_params += pc_transformer print('Total parameters of all models: {:.2f}M'.format(all_params / 1000_000)) mean = np.load(pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'meta', 'mean.npy')) std = np.load(pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'meta', 'std.npy')) train_split_file = pjoin(opt.data_root, 'train.txt') val_split_file = pjoin(opt.data_root, 'val.txt') train_dataset = Text2MotionDataset(opt, mean, std, train_split_file) val_dataset = Text2MotionDataset(opt, mean, std, val_split_file) train_loader = DataLoader(train_dataset, batch_size=opt.batch_size, num_workers=4, shuffle=True, drop_last=True) val_loader = DataLoader(val_dataset, batch_size=opt.batch_size, num_workers=4, shuffle=True, drop_last=True) eval_val_loader, _ = get_dataset_motion_loader(dataset_opt_path, 32, 'val', device=opt.device) wrapper_opt = get_opt(dataset_opt_path, torch.device('cuda')) eval_wrapper = EvaluatorModelWrapper(wrapper_opt)
trainer = ResidualTransformerTrainer(opt, res_transformer, vq_model)
1
2023-11-29 19:21:27+00:00
16k
dvlab-research/LLMGA
llmga/serve/cli-sdxl.py
[ { "identifier": "IMAGE_TOKEN_INDEX", "path": "llmga/llava/constants.py", "snippet": "IMAGE_TOKEN_INDEX = -200" }, { "identifier": "DEFAULT_IMAGE_TOKEN", "path": "llmga/llava/constants.py", "snippet": "DEFAULT_IMAGE_TOKEN = \"<image>\"" }, { "identifier": "DEFAULT_IM_START_TOKEN", "path": "llmga/llava/constants.py", "snippet": "DEFAULT_IM_START_TOKEN = \"<im_start>\"" }, { "identifier": "DEFAULT_IM_END_TOKEN", "path": "llmga/llava/constants.py", "snippet": "DEFAULT_IM_END_TOKEN = \"<im_end>\"" }, { "identifier": "conv_templates", "path": "llmga/llava/conversation.py", "snippet": "class SeparatorStyle(Enum):\nclass Conversation:\n SINGLE = auto()\n TWO = auto()\n MPT = auto()\n PLAIN = auto()\n LLAMA_2 = auto()\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n def get_prompt(self):\n def append_message(self, role, message):\n def get_images(self, return_pil=False):\n def expand2square(pil_img, background_color=(122, 116, 104)):\n def to_gradio_chatbot(self):\n def copy(self):\n def dict(self):" }, { "identifier": "load_pretrained_model", "path": "llmga/llava/model/builder.py", "snippet": "def load_pretrained_model(model_path, model_base, model_name, load_8bit=False, load_4bit=False, device_map=\"auto\"):\n kwargs = {\"device_map\": device_map}\n\n if load_8bit:\n kwargs['load_in_8bit'] = True\n elif load_4bit:\n kwargs['load_in_4bit'] = True\n kwargs['quantization_config'] = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_compute_dtype=torch.float16,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type='nf4'\n )\n else:\n kwargs['torch_dtype'] = torch.float16\n\n if 'llmga' in model_name.lower():\n # Load LLaVA model\n if 'lora' in model_name.lower() and model_base is not None:\n lora_cfg_pretrained = AutoConfig.from_pretrained(model_path)\n tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)\n print('Loading LLMGA from base model...')\n model = LlavaLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs)\n token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features\n if model.lm_head.weight.shape[0] != token_num:\n model.lm_head.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))\n model.model.embed_tokens.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))\n\n print('Loading additional LLMGA weights...')\n if os.path.exists(os.path.join(model_path, 'non_lora_trainables.bin')):\n non_lora_trainables = torch.load(os.path.join(model_path, 'non_lora_trainables.bin'), map_location='cpu')\n else:\n # this is probably from HF Hub\n from huggingface_hub import hf_hub_download\n def load_from_hf(repo_id, filename, subfolder=None):\n cache_file = hf_hub_download(\n repo_id=repo_id,\n filename=filename,\n subfolder=subfolder)\n return torch.load(cache_file, map_location='cpu')\n non_lora_trainables = load_from_hf(model_path, 'non_lora_trainables.bin')\n non_lora_trainables = {(k[11:] if k.startswith('base_model.') else k): v for k, v in non_lora_trainables.items()}\n if any(k.startswith('model.model.') for k in non_lora_trainables):\n non_lora_trainables = {(k[6:] if k.startswith('model.') else k): v for k, v in non_lora_trainables.items()}\n model.load_state_dict(non_lora_trainables, strict=False)\n\n from peft import PeftModel\n print('Loading LoRA weights...')\n model = PeftModel.from_pretrained(model, model_path)\n print('Merging LoRA weights...')\n model = model.merge_and_unload()\n print('Model is loaded...')\n elif model_base is not None:\n # this may be mm projector only\n print('Loading LLMGA from base model...')\n if 'mpt' in model_name.lower():\n if not os.path.isfile(os.path.join(model_path, 'configuration_mpt.py')):\n shutil.copyfile(os.path.join(model_base, 'configuration_mpt.py'), os.path.join(model_path, 'configuration_mpt.py'))\n tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=True)\n cfg_pretrained = AutoConfig.from_pretrained(model_path, trust_remote_code=True)\n model = LlavaMPTForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs)\n else:\n tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)\n cfg_pretrained = AutoConfig.from_pretrained(model_path)\n model = LlavaLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs)\n\n mm_projector_weights = torch.load(os.path.join(model_path, 'mm_projector.bin'), map_location='cpu')\n mm_projector_weights = {k: v.to(torch.float16) for k, v in mm_projector_weights.items()}\n model.load_state_dict(mm_projector_weights, strict=False)\n else:\n if 'mpt' in model_name.lower():\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)\n model = LlavaMPTForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)\n else:\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)\n model = LlavaLlamaForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)\n else:\n # Load language model\n if model_base is not None:\n # PEFT model\n from peft import PeftModel\n tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)\n model = AutoModelForCausalLM.from_pretrained(model_base, torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map=\"auto\")\n print(f\"Loading LoRA weights from {model_path}\")\n model = PeftModel.from_pretrained(model, model_path)\n print(f\"Merging weights\")\n model = model.merge_and_unload()\n print('Convert to FP16...')\n model.to(torch.float16)\n else:\n use_fast = False\n if 'mpt' in model_name.lower():\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)\n model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, trust_remote_code=True, **kwargs)\n else:\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)\n model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)\n\n image_processor = None\n\n if 'llmga' in model_name.lower():\n mm_use_im_start_end = getattr(model.config, \"mm_use_im_start_end\", False)\n mm_use_im_patch_token = getattr(model.config, \"mm_use_im_patch_token\", True)\n if mm_use_im_patch_token:\n tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)\n if mm_use_im_start_end:\n tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)\n model.resize_token_embeddings(len(tokenizer))\n\n vision_tower = model.get_vision_tower()\n if not vision_tower.is_loaded:\n vision_tower.load_model()\n vision_tower.to(device='cuda', dtype=torch.float16)\n image_processor = vision_tower.image_processor\n\n if hasattr(model.config, \"max_sequence_length\"):\n context_len = model.config.max_sequence_length\n else:\n context_len = 2048\n\n return tokenizer, model, image_processor, context_len" }, { "identifier": "disable_torch_init", "path": "llmga/llava/utils.py", "snippet": "def disable_torch_init():\n \"\"\"\n Disable the redundant torch default initialization to accelerate model creation.\n \"\"\"\n import torch\n setattr(torch.nn.Linear, \"reset_parameters\", lambda self: None)\n setattr(torch.nn.LayerNorm, \"reset_parameters\", lambda self: None)" }, { "identifier": "tokenizer_image_token", "path": "llmga/llava/mm_utils.py", "snippet": "def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):\n prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('<image>')]\n def insert_separator(X, sep):\n return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]\n\n input_ids = []\n offset = 0\n if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:\n offset = 1\n input_ids.append(prompt_chunks[0][0])\n\n for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):\n input_ids.extend(x[offset:])\n\n if return_tensors is not None:\n if return_tensors == 'pt':\n return torch.tensor(input_ids, dtype=torch.long)\n raise ValueError(f'Unsupported tensor type: {return_tensors}')\n return input_ids" }, { "identifier": "get_model_name_from_path", "path": "llmga/llava/mm_utils.py", "snippet": "def get_model_name_from_path(model_path):\n model_path = model_path.strip(\"/\")\n model_paths = model_path.split(\"/\")\n if model_paths[-1].startswith('checkpoint-'):\n return model_paths[-2] + \"_\" + model_paths[-1]\n else:\n return model_paths[-1]" }, { "identifier": "KeywordsStoppingCriteria", "path": "llmga/llava/mm_utils.py", "snippet": "class KeywordsStoppingCriteria(StoppingCriteria):\n def __init__(self, keywords, tokenizer, input_ids):\n self.keywords = keywords\n self.keyword_ids = []\n for keyword in keywords:\n cur_keyword_ids = tokenizer(keyword).input_ids\n if len(cur_keyword_ids) > 1 and cur_keyword_ids[0] == tokenizer.bos_token_id:\n cur_keyword_ids = cur_keyword_ids[1:]\n self.keyword_ids.append(torch.tensor(cur_keyword_ids))\n self.tokenizer = tokenizer\n self.start_len = input_ids.shape[1]\n\n def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:\n assert output_ids.shape[0] == 1, \"Only support batch size 1 (yet)\" # TODO\n offset = min(output_ids.shape[1] - self.start_len, 3)\n self.keyword_ids = [keyword_id.to(output_ids.device) for keyword_id in self.keyword_ids]\n for keyword_id in self.keyword_ids:\n if output_ids[0, -keyword_id.shape[0]:] == keyword_id:\n return True\n outputs = self.tokenizer.batch_decode(output_ids[:, -offset:], skip_special_tokens=True)[0]\n for keyword in self.keywords:\n if keyword in outputs:\n return True\n return False" }, { "identifier": "StableDiffusionXLPipeline", "path": "llmga/diffusers/pipeline_stable_diffusion_xl_lpw.py", "snippet": "class StableDiffusionXLPipeline(\n DiffusionPipeline, FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin\n):\n r\"\"\"\n Pipeline for text-to-image generation using Stable Diffusion XL.\n\n This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the\n library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)\n\n In addition the pipeline inherits the following loading methods:\n - *LoRA*: [`loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`]\n - *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]\n\n as well as the following saving methods:\n - *LoRA*: [`loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`]\n\n Args:\n vae ([`AutoencoderKL`]):\n Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.\n text_encoder ([`CLIPTextModel`]):\n Frozen text-encoder. Stable Diffusion XL uses the text portion of\n [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically\n the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.\n text_encoder_2 ([` CLIPTextModelWithProjection`]):\n Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of\n [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),\n specifically the\n [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)\n variant.\n tokenizer (`CLIPTokenizer`):\n Tokenizer of class\n [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).\n tokenizer_2 (`CLIPTokenizer`):\n Second Tokenizer of class\n [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).\n unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.\n scheduler ([`SchedulerMixin`]):\n A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of\n [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].\n force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `\"True\"`):\n Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of\n `stabilityai/stable-diffusion-xl-base-1-0`.\n add_watermarker (`bool`, *optional*):\n Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to\n watermark output images. If not defined, it will default to True if the package is installed, otherwise no\n watermarker will be used.\n \"\"\"\n model_cpu_offload_seq = \"text_encoder->text_encoder_2->unet->vae\"\n\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n text_encoder_2: CLIPTextModelWithProjection,\n tokenizer: CLIPTokenizer,\n tokenizer_2: CLIPTokenizer,\n unet: UNet2DConditionModel,\n scheduler: KarrasDiffusionSchedulers,\n force_zeros_for_empty_prompt: bool = True,\n add_watermarker: Optional[bool] = None,\n ):\n super().__init__()\n\n self.register_modules(\n vae=vae,\n text_encoder=text_encoder,\n text_encoder_2=text_encoder_2,\n tokenizer=tokenizer,\n tokenizer_2=tokenizer_2,\n unet=unet,\n scheduler=scheduler,\n )\n self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)\n self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)\n self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)\n self.default_sample_size = self.unet.config.sample_size\n\n add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()\n\n if add_watermarker:\n self.watermark = StableDiffusionXLWatermarker()\n else:\n self.watermark = None\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing\n def enable_vae_slicing(self):\n r\"\"\"\n Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to\n compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.\n \"\"\"\n self.vae.enable_slicing()\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing\n def disable_vae_slicing(self):\n r\"\"\"\n Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to\n computing decoding in one step.\n \"\"\"\n self.vae.disable_slicing()\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling\n def enable_vae_tiling(self):\n r\"\"\"\n Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to\n compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow\n processing larger images.\n \"\"\"\n self.vae.enable_tiling()\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling\n def disable_vae_tiling(self):\n r\"\"\"\n Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to\n computing decoding in one step.\n \"\"\"\n self.vae.disable_tiling()\n\n def encode_prompt(\n self,\n prompt: str,\n prompt_2: Optional[str] = None,\n device: Optional[torch.device] = None,\n num_images_per_prompt: int = 1,\n do_classifier_free_guidance: bool = True,\n negative_prompt: Optional[str] = None,\n negative_prompt_2: Optional[str] = None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n pooled_prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,\n lora_scale: Optional[float] = None,\n clip_skip: Optional[int] = None,\n ):\n r\"\"\"\n Encodes the prompt into text encoder hidden states.\n\n Args:\n prompt (`str` or `List[str]`, *optional*):\n prompt to be encoded\n prompt_2 (`str` or `List[str]`, *optional*):\n The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is\n used in both text-encoders\n device: (`torch.device`):\n torch device\n num_images_per_prompt (`int`):\n number of images that should be generated per prompt\n do_classifier_free_guidance (`bool`):\n whether to use classifier free guidance or not\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts not to guide the image generation. If not defined, one has to pass\n `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is\n less than `1`).\n negative_prompt_2 (`str` or `List[str]`, *optional*):\n The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and\n `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders\n prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not\n provided, text embeddings will be generated from `prompt` input argument.\n negative_prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt\n weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input\n argument.\n pooled_prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.\n If not provided, pooled text embeddings will be generated from `prompt` input argument.\n negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt\n weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`\n input argument.\n lora_scale (`float`, *optional*):\n A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.\n clip_skip (`int`, *optional*):\n Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that\n the output of the pre-final layer will be used for computing the prompt embeddings.\n \"\"\"\n device = device or self._execution_device\n\n # set lora scale so that monkey patched LoRA\n # function of text encoder can correctly access it\n if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):\n self._lora_scale = lora_scale\n\n # dynamically adjust the LoRA scale\n if not USE_PEFT_BACKEND:\n adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)\n adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)\n else:\n scale_lora_layers(self.text_encoder, lora_scale)\n scale_lora_layers(self.text_encoder_2, lora_scale)\n\n prompt = [prompt] if isinstance(prompt, str) else prompt\n\n if prompt is not None:\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n # Define tokenizers and text encoders\n tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]\n text_encoders = (\n [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]\n )\n\n if prompt_embeds is None:\n prompt_2 = prompt_2 or prompt\n prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2\n\n # textual inversion: procecss multi-vector tokens if necessary\n prompt_embeds_list = []\n prompts = [prompt, prompt_2]\n fg=0\n for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):\n if isinstance(self, TextualInversionLoaderMixin):\n prompt = self.maybe_convert_prompt(prompt, tokenizer)\n\n text_input_ids = get_text_index(tokenizer,prompt)\n\n \n untruncated_ids = tokenizer(prompt, padding=\"longest\", return_tensors=\"pt\").input_ids\n\n if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(\n text_input_ids, untruncated_ids\n ):\n removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])\n logger.warning(\n \"The following part of your input was truncated because CLIP can only handle sequences up to\"\n f\" {tokenizer.model_max_length} tokens: {removed_text}\"\n )\n\n if fg==0:\n text_embeddings, hidden_states = get_unweighted_text_embeddings_SDXL1(text_encoder,text_input_ids.to(device),chunk_length=tokenizer.model_max_length,clip_skip=clip_skip)\n fg=1\n else:\n text_embeddings, hidden_states = get_unweighted_text_embeddings_SDXL2(text_encoder,text_input_ids.to(device),chunk_length=tokenizer.model_max_length,clip_skip=clip_skip)\n\n\n # We are only ALWAYS interested in the pooled output of the final text encoder\n pooled_prompt_embeds = text_embeddings\n\n\n prompt_embeds_list.append(hidden_states)\n\n prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)\n\n # get unconditional embeddings for classifier free guidance\n zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt\n if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:\n negative_prompt_embeds = torch.zeros_like(prompt_embeds)\n negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)\n elif do_classifier_free_guidance and negative_prompt_embeds is None:\n negative_prompt = negative_prompt or \"\"\n negative_prompt_2 = negative_prompt_2 or negative_prompt\n\n # normalize str to list\n negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt\n negative_prompt_2 = (\n batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2\n )\n\n uncond_tokens: List[str]\n if prompt is not None and type(prompt) is not type(negative_prompt):\n raise TypeError(\n f\"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=\"\n f\" {type(prompt)}.\"\n )\n elif batch_size != len(negative_prompt):\n raise ValueError(\n f\"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:\"\n f\" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches\"\n \" the batch size of `prompt`.\"\n )\n else:\n uncond_tokens = [negative_prompt, negative_prompt_2]\n\n negative_prompt_embeds_list = []\n fg=1\n for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):\n if isinstance(self, TextualInversionLoaderMixin):\n negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)\n\n max_length = prompt_embeds.shape[1]\n\n uncond_input = get_text_index(tokenizer,negative_prompt)\n\n if fg==0:\n negative_pooled_prompt_embeds, negative_prompt_embeds = get_unweighted_text_embeddings_SDXL1(text_encoder,uncond_input.to(device),chunk_length=tokenizer.model_max_length,clip_skip=clip_skip)\n fg=1\n else:\n negative_pooled_prompt_embeds, negative_prompt_embeds = get_unweighted_text_embeddings_SDXL2(text_encoder,uncond_input.to(device),chunk_length=tokenizer.model_max_length,clip_skip=clip_skip)\n\n negative_prompt_embeds_list.append(negative_prompt_embeds)\n\n negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)\n\n prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)\n bs_embed, seq_len, _ = prompt_embeds.shape\n # duplicate text embeddings for each generation per prompt, using mps friendly method\n prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)\n prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)\n\n if do_classifier_free_guidance:\n # duplicate unconditional embeddings for each generation per prompt, using mps friendly method\n seq_len = negative_prompt_embeds.shape[1]\n negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)\n negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)\n negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)\n\n pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(\n bs_embed * num_images_per_prompt, -1\n )\n if do_classifier_free_guidance:\n negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(\n bs_embed * num_images_per_prompt, -1\n )\n\n if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:\n # Retrieve the original scale by scaling back the LoRA layers\n unscale_lora_layers(self.text_encoder)\n unscale_lora_layers(self.text_encoder_2)\n\n return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs\n def prepare_extra_step_kwargs(self, generator, eta):\n # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature\n # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.\n # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502\n # and should be between [0, 1]\n\n accepts_eta = \"eta\" in set(inspect.signature(self.scheduler.step).parameters.keys())\n extra_step_kwargs = {}\n if accepts_eta:\n extra_step_kwargs[\"eta\"] = eta\n\n # check if the scheduler accepts generator\n accepts_generator = \"generator\" in set(inspect.signature(self.scheduler.step).parameters.keys())\n if accepts_generator:\n extra_step_kwargs[\"generator\"] = generator\n return extra_step_kwargs\n\n def check_inputs(\n self,\n prompt,\n prompt_2,\n height,\n width,\n callback_steps,\n negative_prompt=None,\n negative_prompt_2=None,\n prompt_embeds=None,\n negative_prompt_embeds=None,\n pooled_prompt_embeds=None,\n negative_pooled_prompt_embeds=None,\n ):\n if height % 8 != 0 or width % 8 != 0:\n raise ValueError(f\"`height` and `width` have to be divisible by 8 but are {height} and {width}.\")\n\n if (callback_steps is None) or (\n callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)\n ):\n raise ValueError(\n f\"`callback_steps` has to be a positive integer but is {callback_steps} of type\"\n f\" {type(callback_steps)}.\"\n )\n\n if prompt is not None and prompt_embeds is not None:\n raise ValueError(\n f\"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to\"\n \" only forward one of the two.\"\n )\n elif prompt_2 is not None and prompt_embeds is not None:\n raise ValueError(\n f\"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to\"\n \" only forward one of the two.\"\n )\n elif prompt is None and prompt_embeds is None:\n raise ValueError(\n \"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.\"\n )\n elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):\n raise ValueError(f\"`prompt` has to be of type `str` or `list` but is {type(prompt)}\")\n elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):\n raise ValueError(f\"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}\")\n\n if negative_prompt is not None and negative_prompt_embeds is not None:\n raise ValueError(\n f\"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:\"\n f\" {negative_prompt_embeds}. Please make sure to only forward one of the two.\"\n )\n elif negative_prompt_2 is not None and negative_prompt_embeds is not None:\n raise ValueError(\n f\"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:\"\n f\" {negative_prompt_embeds}. Please make sure to only forward one of the two.\"\n )\n\n if prompt_embeds is not None and negative_prompt_embeds is not None:\n if prompt_embeds.shape != negative_prompt_embeds.shape:\n raise ValueError(\n \"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but\"\n f\" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`\"\n f\" {negative_prompt_embeds.shape}.\"\n )\n\n if prompt_embeds is not None and pooled_prompt_embeds is None:\n raise ValueError(\n \"If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`.\"\n )\n\n if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:\n raise ValueError(\n \"If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`.\"\n )\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents\n def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):\n shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)\n if isinstance(generator, list) and len(generator) != batch_size:\n raise ValueError(\n f\"You have passed a list of generators of length {len(generator)}, but requested an effective batch\"\n f\" size of {batch_size}. Make sure the batch size matches the length of the generators.\"\n )\n\n if latents is None:\n latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)\n else:\n latents = latents.to(device)\n\n # scale the initial noise by the standard deviation required by the scheduler\n latents = latents * self.scheduler.init_noise_sigma\n return latents\n\n def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype):\n add_time_ids = list(original_size + crops_coords_top_left + target_size)\n\n passed_add_embed_dim = (\n self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim\n )\n expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features\n\n if expected_add_embed_dim != passed_add_embed_dim:\n raise ValueError(\n f\"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`.\"\n )\n\n add_time_ids = torch.tensor([add_time_ids], dtype=dtype)\n return add_time_ids\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae\n def upcast_vae(self):\n dtype = self.vae.dtype\n self.vae.to(dtype=torch.float32)\n use_torch_2_0_or_xformers = isinstance(\n self.vae.decoder.mid_block.attentions[0].processor,\n (\n AttnProcessor2_0,\n XFormersAttnProcessor,\n LoRAXFormersAttnProcessor,\n LoRAAttnProcessor2_0,\n ),\n )\n # if xformers or torch_2_0 is used attention block does not need\n # to be in float32 which can save lots of memory\n if use_torch_2_0_or_xformers:\n self.vae.post_quant_conv.to(dtype)\n self.vae.decoder.conv_in.to(dtype)\n self.vae.decoder.mid_block.to(dtype)\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_freeu\n def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):\n r\"\"\"Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497.\n\n The suffixes after the scaling factors represent the stages where they are being applied.\n\n Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values\n that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.\n\n Args:\n s1 (`float`):\n Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to\n mitigate \"oversmoothing effect\" in the enhanced denoising process.\n s2 (`float`):\n Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to\n mitigate \"oversmoothing effect\" in the enhanced denoising process.\n b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.\n b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.\n \"\"\"\n if not hasattr(self, \"unet\"):\n raise ValueError(\"The pipeline must have `unet` for using FreeU.\")\n self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2)\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_freeu\n def disable_freeu(self):\n \"\"\"Disables the FreeU mechanism if enabled.\"\"\"\n self.unet.disable_freeu()\n\n @torch.no_grad()\n @replace_example_docstring(EXAMPLE_DOC_STRING)\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n prompt_2: Optional[Union[str, List[str]]] = None,\n height: Optional[int] = None,\n width: Optional[int] = None,\n num_inference_steps: int = 50,\n denoising_end: Optional[float] = None,\n guidance_scale: float = 5.0,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n negative_prompt_2: Optional[Union[str, List[str]]] = None,\n num_images_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n pooled_prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n guidance_rescale: float = 0.0,\n original_size: Optional[Tuple[int, int]] = None,\n crops_coords_top_left: Tuple[int, int] = (0, 0),\n target_size: Optional[Tuple[int, int]] = None,\n negative_original_size: Optional[Tuple[int, int]] = None,\n negative_crops_coords_top_left: Tuple[int, int] = (0, 0),\n negative_target_size: Optional[Tuple[int, int]] = None,\n clip_skip: Optional[int] = None,\n ):\n r\"\"\"\n Function invoked when calling the pipeline for generation.\n\n Args:\n prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.\n instead.\n prompt_2 (`str` or `List[str]`, *optional*):\n The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is\n used in both text-encoders\n height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):\n The height in pixels of the generated image. This is set to 1024 by default for the best results.\n Anything below 512 pixels won't work well for\n [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)\n and checkpoints that are not specifically fine-tuned on low resolutions.\n width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):\n The width in pixels of the generated image. This is set to 1024 by default for the best results.\n Anything below 512 pixels won't work well for\n [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)\n and checkpoints that are not specifically fine-tuned on low resolutions.\n num_inference_steps (`int`, *optional*, defaults to 50):\n The number of denoising steps. More denoising steps usually lead to a higher quality image at the\n expense of slower inference.\n denoising_end (`float`, *optional*):\n When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be\n completed before it is intentionally prematurely terminated. As a result, the returned sample will\n still retain a substantial amount of noise as determined by the discrete timesteps selected by the\n scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a\n \"Mixture of Denoisers\" multi-pipeline setup, as elaborated in [**Refining the Image\n Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)\n guidance_scale (`float`, *optional*, defaults to 5.0):\n Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).\n `guidance_scale` is defined as `w` of equation 2. of [Imagen\n Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >\n 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,\n usually at the expense of lower image quality.\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts not to guide the image generation. If not defined, one has to pass\n `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is\n less than `1`).\n negative_prompt_2 (`str` or `List[str]`, *optional*):\n The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and\n `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders\n num_images_per_prompt (`int`, *optional*, defaults to 1):\n The number of images to generate per prompt.\n eta (`float`, *optional*, defaults to 0.0):\n Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to\n [`schedulers.DDIMScheduler`], will be ignored for others.\n generator (`torch.Generator` or `List[torch.Generator]`, *optional*):\n One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)\n to make generation deterministic.\n latents (`torch.FloatTensor`, *optional*):\n Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image\n generation. Can be used to tweak the same generation with different prompts. If not provided, a latents\n tensor will ge generated by sampling using the supplied random `generator`.\n prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not\n provided, text embeddings will be generated from `prompt` input argument.\n negative_prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt\n weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input\n argument.\n pooled_prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.\n If not provided, pooled text embeddings will be generated from `prompt` input argument.\n negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt\n weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`\n input argument.\n output_type (`str`, *optional*, defaults to `\"pil\"`):\n The output format of the generate image. Choose between\n [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead\n of a plain tuple.\n callback (`Callable`, *optional*):\n A function that will be called every `callback_steps` steps during inference. The function will be\n called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.\n callback_steps (`int`, *optional*, defaults to 1):\n The frequency at which the `callback` function will be called. If not specified, the callback will be\n called at every step.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under\n `self.processor` in\n [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).\n guidance_rescale (`float`, *optional*, defaults to 0.0):\n Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are\n Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of\n [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).\n Guidance rescale factor should fix overexposure when using zero terminal SNR.\n original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):\n If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.\n `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as\n explained in section 2.2 of\n [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).\n crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):\n `crops_coords_top_left` can be used to generate an image that appears to be \"cropped\" from the position\n `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting\n `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of\n [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).\n target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):\n For most cases, `target_size` should be set to the desired height and width of the generated image. If\n not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in\n section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).\n negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):\n To negatively condition the generation process based on a specific image resolution. Part of SDXL's\n micro-conditioning as explained in section 2.2 of\n [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more\n information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.\n negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):\n To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's\n micro-conditioning as explained in section 2.2 of\n [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more\n information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.\n negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):\n To negatively condition the generation process based on a target image resolution. It should be as same\n as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of\n [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more\n information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.\n\n Examples:\n\n Returns:\n [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`:\n [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a\n `tuple`. When returning a tuple, the first element is a list with the generated images.\n \"\"\"\n # 0. Default height and width to unet\n height = height or self.default_sample_size * self.vae_scale_factor\n width = width or self.default_sample_size * self.vae_scale_factor\n\n original_size = original_size or (height, width)\n target_size = target_size or (height, width)\n\n # 1. Check inputs. Raise error if not correct\n self.check_inputs(\n prompt,\n prompt_2,\n height,\n width,\n callback_steps,\n negative_prompt,\n negative_prompt_2,\n prompt_embeds,\n negative_prompt_embeds,\n pooled_prompt_embeds,\n negative_pooled_prompt_embeds,\n )\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n device = self._execution_device\n\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n # 3. Encode input prompt\n lora_scale = cross_attention_kwargs.get(\"scale\", None) if cross_attention_kwargs is not None else None\n\n (\n prompt_embeds,\n negative_prompt_embeds,\n pooled_prompt_embeds,\n negative_pooled_prompt_embeds,\n ) = self.encode_prompt(\n prompt=prompt,\n prompt_2=prompt_2,\n device=device,\n num_images_per_prompt=num_images_per_prompt,\n do_classifier_free_guidance=do_classifier_free_guidance,\n negative_prompt=negative_prompt,\n negative_prompt_2=negative_prompt_2,\n prompt_embeds=prompt_embeds,\n negative_prompt_embeds=negative_prompt_embeds,\n pooled_prompt_embeds=pooled_prompt_embeds,\n negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,\n lora_scale=lora_scale,\n clip_skip=clip_skip,\n )\n\n # 4. Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n\n timesteps = self.scheduler.timesteps\n\n # 5. Prepare latent variables\n num_channels_latents = self.unet.config.in_channels\n latents = self.prepare_latents(\n batch_size * num_images_per_prompt,\n num_channels_latents,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n latents,\n )\n\n # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # 7. Prepare added time ids & embeddings\n add_text_embeds = pooled_prompt_embeds\n add_time_ids = self._get_add_time_ids(\n original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype\n )\n if negative_original_size is not None and negative_target_size is not None:\n negative_add_time_ids = self._get_add_time_ids(\n negative_original_size,\n negative_crops_coords_top_left,\n negative_target_size,\n dtype=prompt_embeds.dtype,\n )\n else:\n negative_add_time_ids = add_time_ids\n\n if do_classifier_free_guidance:\n prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)\n add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)\n add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)\n\n prompt_embeds = prompt_embeds.to(device)\n add_text_embeds = add_text_embeds.to(device)\n add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)\n\n # 8. Denoising loop\n num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)\n\n # 8.1 Apply denoising_end\n if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1:\n discrete_timestep_cutoff = int(\n round(\n self.scheduler.config.num_train_timesteps\n - (denoising_end * self.scheduler.config.num_train_timesteps)\n )\n )\n num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))\n timesteps = timesteps[:num_inference_steps]\n\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n # predict the noise residual\n added_cond_kwargs = {\"text_embeds\": add_text_embeds, \"time_ids\": add_time_ids}\n noise_pred = self.unet(\n latent_model_input,\n t,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n added_cond_kwargs=added_cond_kwargs,\n return_dict=False,\n )[0]\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n if do_classifier_free_guidance and guidance_rescale > 0.0:\n # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf\n noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n step_idx = i // getattr(self.scheduler, \"order\", 1)\n callback(step_idx, t, latents)\n\n if XLA_AVAILABLE:\n xm.mark_step()\n\n if not output_type == \"latent\":\n # make sure the VAE is in float32 mode, as it overflows in float16\n needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast\n\n if needs_upcasting:\n self.upcast_vae()\n latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)\n\n image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]\n\n # cast back to fp16 if needed\n if needs_upcasting:\n self.vae.to(dtype=torch.float16)\n else:\n image = latents\n\n if not output_type == \"latent\":\n # apply watermark if available\n if self.watermark is not None:\n image = self.watermark.apply_watermark(image)\n\n image = self.image_processor.postprocess(image, output_type=output_type)\n\n # Offload all models\n self.maybe_free_model_hooks()\n\n if not return_dict:\n return (image,)\n\n return StableDiffusionXLPipelineOutput(images=image)" } ]
import argparse import torch import requests import os import copy from llmga.llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN from llmga.llava.conversation import conv_templates, SeparatorStyle from llmga.llava.model.builder import load_pretrained_model from llmga.llava.utils import disable_torch_init from llmga.llava.mm_utils import tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria from PIL import Image from PIL import Image from io import BytesIO from llmga.diffusers.pipeline_stable_diffusion_xl_lpw import StableDiffusionXLPipeline
14,275
def load_image(image_file): if image_file.startswith('http') or image_file.startswith('https'): response = requests.get(image_file) image = Image.open(BytesIO(response.content)).convert('RGB') else: image = Image.open(image_file).convert('RGB') return image def main(args): # Model disable_torch_init() model_name = get_model_name_from_path(args.model_path) tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, args.load_8bit, args.load_4bit)
def load_image(image_file): if image_file.startswith('http') or image_file.startswith('https'): response = requests.get(image_file) image = Image.open(BytesIO(response.content)).convert('RGB') else: image = Image.open(image_file).convert('RGB') return image def main(args): # Model disable_torch_init() model_name = get_model_name_from_path(args.model_path) tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, args.load_8bit, args.load_4bit)
pipe = StableDiffusionXLPipeline.from_pretrained(
10
2023-11-27 18:46:55+00:00
16k
sherwinbahmani/4dfy
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference']\n finite_difference_normal_eps: float = 0.01\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n mesh = trimesh.load(mesh_path)\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(1000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if self.cfg.normal_type == \"finite_difference\":\n eps = self.cfg.finite_difference_normal_eps\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(points_offset)\n normal = (\n 0.5 * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0]) / eps\n )\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n return sdf, deformation\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n anneal_density_blob_std_config: Optional[dict] = None\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n self.density_blob_std = self.cfg.density_blob_std\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if self.cfg.normal_type == \"finite_difference\":\n # TODO: use raw density\n eps = self.cfg.finite_difference_normal_eps\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )\n \n def update_step(\n self, epoch: int, global_step: int, on_load_weights: bool = False\n ) -> None:\n if self.cfg.anneal_density_blob_std_config is not None:\n min_step = self.cfg.anneal_density_blob_std_config.min_anneal_step\n max_step = self.cfg.anneal_density_blob_std_config.max_anneal_step\n if (\n global_step >= min_step\n and global_step <= max_step\n ): \n end_val = self.cfg.anneal_density_blob_std_config.end_val\n start_val = self.cfg.anneal_density_blob_std_config.start_val\n self.density_blob_std = start_val + (global_step - min_step)*(end_val - start_val)/(max_step - min_step)" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n vn_idx[i] = self.t_nrm_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n atlas.generate(co, po)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n elif config.otype == \"HashGridSpatialTime\":\n encoding = TCNNEncodingSpatialTime(n_input_dims, config)\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" } ]
from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio
13,512
nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: raise NotImplementedError def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance elif isinstance(other, ImplicitVolume): instance = TetrahedraSDFGrid(cfg, **kwargs) if other.cfg.isosurface_method != "mt": other.cfg.isosurface_method = "mt" threestudio.warn( f"Override isosurface_method of the source geometry to 'mt'" ) if other.cfg.isosurface_resolution != instance.cfg.isosurface_resolution: other.cfg.isosurface_resolution = instance.cfg.isosurface_resolution threestudio.warn( f"Override isosurface_resolution of the source geometry to {instance.cfg.isosurface_resolution}" ) mesh = other.isosurface() instance.isosurface_bbox = mesh.extras["bbox"] instance.sdf.data = ( mesh.extras["grid_level"].to(instance.sdf.data).clamp(-1, 1) ) if not instance.cfg.geometry_only and copy_net: instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: raise NotImplementedError def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance elif isinstance(other, ImplicitVolume): instance = TetrahedraSDFGrid(cfg, **kwargs) if other.cfg.isosurface_method != "mt": other.cfg.isosurface_method = "mt" threestudio.warn( f"Override isosurface_method of the source geometry to 'mt'" ) if other.cfg.isosurface_resolution != instance.cfg.isosurface_resolution: other.cfg.isosurface_resolution = instance.cfg.isosurface_resolution threestudio.warn( f"Override isosurface_resolution of the source geometry to {instance.cfg.isosurface_resolution}" ) mesh = other.isosurface() instance.isosurface_bbox = mesh.extras["bbox"] instance.sdf.data = ( mesh.extras["grid_level"].to(instance.sdf.data).clamp(-1, 1) ) if not instance.cfg.geometry_only and copy_net: instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
elif isinstance(other, ImplicitSDF):
3
2023-11-29 05:15:56+00:00
16k
rlawjdghek/StableVITON
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False\n ):\n super().__init__()\n self.lossconfig = lossconfig\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = torch.nn.Identity()\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n def init_loss(self):\n self.loss = instantiate_from_config(self.lossconfig)\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx):\n real_img = self.get_input(batch, self.image_key)\n recon, posterior = self(real_img)\n loss = self.loss(real_img, recon, posterior)\n return loss\n \n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.decoder.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n if log_ema or self.use_ema:\n with self.ema_scope():\n xrec_ema, posterior_ema = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec_ema.shape[1] > 3\n xrec_ema = self.to_rgb(xrec_ema)\n log[\"samples_ema\"] = self.decode(torch.randn_like(posterior_ema.sample()))\n log[\"reconstructions_ema\"] = xrec_ema\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n print(f\"beta scheduler name : {schedule}\")\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "zero_module", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module" }, { "identifier": "conv_nd", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates, cond_output_dict = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates, cond_output_dict\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0, cond_output_dict = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n if cond_output_dict is not None:\n cond_output = cond_output_dict[\"cond_output\"] \n if self.model.use_noisy_cond:\n b = cond_output.shape[0]\n\n alphas = self.model.alphas_cumprod if ddim_use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if ddim_use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if ddim_use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if ddim_use_original_steps else self.ddim_sigmas\n\n device = cond_output.device\n a_t = torch.full((b, 1, 1, 1), alphas[0], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[0], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[0], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[0], device=device)\n\n c = cond_output_dict[\"cond_input\"]\n e_t = cond_output\n pred_c0 = (c - sqrt_one_minus_at * e_t) / a_t.sqrt()\n dir_ct = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(c.shape, device, False) * temperature\n\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n cond_output = a_prev.sqrt() * pred_c0 + dir_ct + noise \n cond_output_dict[f\"cond_sample\"] = cond_output\n return img, intermediates, cond_output_dict\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output, cond_output_dict = self.model.apply_model(x, t, c)\n else:\n # x_in = torch.cat([x] * 2)\n # t_in = torch.cat([t] * 2)\n # if isinstance(c, dict):\n # assert isinstance(unconditional_conditioning, dict)\n # c_in = dict()\n # for k in c:\n # if isinstance(c[k], list):\n # c_in[k] = [torch.cat([\n # unconditional_conditioning[k][i],\n # c[k][i]]) for i in range(len(c[k]))]\n # else:\n # c_in[k] = torch.cat([\n # unconditional_conditioning[k],\n # c[k]])\n # elif isinstance(c, list):\n # c_in = list()\n # assert isinstance(unconditional_conditioning, list)\n # for i in range(len(c)):\n # c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n # else:\n # c_in = torch.cat([unconditional_conditioning, c])\n x_in = x\n t_in = t\n model_t, cond_output_dict_cond = self.model.apply_model(x_in, t_in, c)\n model_uncond, cond_output_dict_uncond = self.model.apply_model(x_in, t_in, unconditional_conditioning)\n if isinstance(model_t, tuple):\n model_t, _ = model_t\n if isinstance(model_uncond, tuple):\n model_uncond, _ = model_uncond\n if cond_output_dict_cond is not None:\n cond_output_dict = dict()\n for k in cond_output_dict_cond.keys():\n cond_output_dict[k] = torch.cat([cond_output_dict_uncond[k], cond_output_dict_cond[k]])\n else:\n cond_output_dict = None\n # model_output, cond_output_dict = self.model.apply_model(x_in, t_in, c_in)\n # model_uncond, model_t = model_output.chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n\n return x_prev, pred_x0, cond_output_dict\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)[0]\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)[0]\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" } ]
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools import torchvision.transforms as T import random import torch.nn.functional as F from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from torchvision.transforms.functional import resize from diffusers.models.autoencoder_kl import AutoencoderKLOutput from diffusers.models.vae import DecoderOutput from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like, zero_module, conv_nd from ldm.models.diffusion.ddim import DDIMSampler
12,955
if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning(self, batch_size, null_label=None): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: if self.cond_stage_key in ["class_label", "cls"]: xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device) return self.get_learned_conditioning(xc) else: raise NotImplementedError("todo") if isinstance(c, list): # in case the encoder gives us a list for i in range(len(c)): c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device) else: c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) return c @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, **kwargs): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) log["conditioning"] = xc elif self.cond_stage_key in ['class_label', "cls"]: try: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25) log['conditioning'] = xc except KeyError: # probably no "human_label" in batch pass elif isimage(xc): log["conditioning"] = xc
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, l_cond_simple_weight=1.0, l_cond_recon_weight=1.0, **kwargs ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.unet_config = unet_config self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.imagenet_norm = T.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight self.l_cond_simple_weight = l_cond_simple_weight self.l_cond_recon_weight = l_cond_recon_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) else: self.register_buffer('logvar', logvar) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_v(self, x, noise, t): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x ) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}_loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}_loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}_loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): self.batch = batch for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, force_null_conditioning=False, *args, **kwargs): self.kwargs = kwargs self.force_null_conditioning = force_null_conditioning self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std self.cond_stage_trainable = cond_stage_trainable assert self.num_timesteps_cond <= kwargs['timesteps'] if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning: conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) reset_ema = kwargs.pop("reset_ema", False) reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None if self.kwargs["use_imageCLIP"]: self.proj_out = nn.Linear(1024, 768) else: self.proj_out = None if self.use_pbe_weight: print("learnable vector gene") self.learnable_vector = nn.Parameter(torch.randn((1,1,768)), requires_grad=True) else: self.learnable_vector = None if self.kwargs["use_lastzc"]: # deprecated self.lastzc = zero_module(conv_nd(2, 4, 4, 1, 1, 0)) else: self.lastzc = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True if reset_ema: assert self.use_ema print( f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None else: model = instantiate_from_config(config) self.cond_stage_model = model else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior elif isinstance(encoder_posterior, AutoencoderKLOutput): z = encoder_posterior.latent_dist.sample() else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, return_x=False, no_latent=False, is_controlnet=False): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) if no_latent: _,_,h,w = x.shape x = resize(x, (h//8, w//8)) return [x, None] encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if is_controlnet and self.lastzc is not None: z = self.lastzc(z) if self.model.conditioning_key is not None and not self.force_null_conditioning: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox', "txt"]: xc = batch[cond_key] elif cond_key in ['class_label', 'cls']: xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if self.kwargs["use_imageCLIP"]: xc = resize(xc, (224,224)) xc = self.imagenet_norm((xc+1)/2) c = xc else: if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) c = c.float() else: if self.kwargs["use_imageCLIP"]: xc = resize(xc, (224,224)) xc = self.imagenet_norm((xc+1)/2) c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_x: out.extend([x]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z output = self.first_stage_model.decode(z) if not isinstance(output, DecoderOutput): return output else: return output.sample def decode_first_stage_train(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): if not self.use_pbe_weight: t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) # pbe negative condition else: t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() self.u_cond_prop=random.uniform(0, 1) c["c_crossattn"] = [self.get_learned_conditioning(c["c_crossattn"])] if self.u_cond_prop < self.u_cond_percent: c["c_crossattn"] = [self.learnable_vector.repeat(x.shape[0],1,1)] return self.p_losses(x, c, t, *args, **kwargs) def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is expected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): loss_dict = {} noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output, cond_output_dict = self.apply_model(x_noisy, t, cond) prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError() model_loss = None if isinstance(model_output, tuple): model_output, model_loss = model_output if self.only_agn_simple_loss: _, _, l_h, l_w = model_output.shape m_agn = F.interpolate(super().get_input(self.batch, "agn_mask"), (l_h, l_w)) loss_simple = self.get_loss(model_output * (1-m_agn), target * (1-m_agn), mean=False).mean([1, 2, 3]) else: loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f'simple': loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() if self.original_elbo_weight != 0: loss_dict.update({f'loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) if model_loss is not None: loss += model_loss loss_dict.update({f"model loss" : model_loss}) loss_dict.update({f'{prefix}_loss': loss}) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out, cond_output_dict = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if isinstance(model_out, tuple): model_out, _ = model_out if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning(self, batch_size, null_label=None): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: if self.cond_stage_key in ["class_label", "cls"]: xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device) return self.get_learned_conditioning(xc) else: raise NotImplementedError("todo") if isinstance(c, list): # in case the encoder gives us a list for i in range(len(c)): c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device) else: c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) return c @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, **kwargs): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) log["conditioning"] = xc elif self.cond_stage_key in ['class_label', "cls"]: try: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25) log['conditioning'] = xc except KeyError: # probably no "human_label" in batch pass elif isimage(xc): log["conditioning"] = xc
if ismap(xc):
3
2023-12-02 05:56:58+00:00
16k
AIFSH/NativeSpeaker
src/core.py
[ { "identifier": "HandleLog", "path": "src/log_helper.py", "snippet": "class HandleLog:\n \"\"\"\n 先创建日志记录器(logging.getLogger),然后再设置日志级别(logger.setLevel),\n 接着再创建日志文件,也就是日志保存的地方(logging.FileHandler),然后再设置日志格式(logging.Formatter),\n 最后再将日志处理程序记录到记录器(addHandler)\n \"\"\"\n\n def __init__(self):\n self.__now_time = datetime.now().strftime('%Y-%m-%d') # 当前日期格式化\n self.__all_log_path = os.path.join(log_path, self.__now_time + \"-all\" + \".log\") # 收集所有日志信息文件\n self.__error_log_path = os.path.join(log_path, self.__now_time + \"-error\" + \".log\") # 收集错误日志信息文件\n self.__logger = logging.getLogger() # 创建日志记录器\n self.__logger.setLevel(logging.DEBUG) # 设置默认日志记录器记录级别\n\n @staticmethod\n def __init_logger_handler(log_path):\n \"\"\"\n 创建日志记录器handler,用于收集日志\n :param log_path: 日志文件路径\n :return: 日志记录器\n \"\"\"\n # 写入文件,如果文件超过1M大小时,切割日志文件,仅保留3个文件\n logger_handler = RotatingFileHandler(filename=log_path, maxBytes=1 * 1024 * 1024, backupCount=3, encoding='utf-8')\n return logger_handler\n\n @staticmethod\n def __init_console_handle():\n \"\"\"创建终端日志记录器handler,用于输出到控制台\"\"\"\n console_handle = colorlog.StreamHandler()\n return console_handle\n\n def __set_log_handler(self, logger_handler, level=logging.DEBUG):\n \"\"\"\n 设置handler级别并添加到logger收集器\n :param logger_handler: 日志记录器\n :param level: 日志记录器级别\n \"\"\"\n logger_handler.setLevel(level=level)\n self.__logger.addHandler(logger_handler)\n\n def __set_color_handle(self, console_handle):\n \"\"\"\n 设置handler级别并添加到终端logger收集器\n :param console_handle: 终端日志记录器\n :param level: 日志记录器级别\n \"\"\"\n console_handle.setLevel(logging.DEBUG)\n self.__logger.addHandler(console_handle)\n\n @staticmethod\n def __set_color_formatter(console_handle, color_config):\n \"\"\"\n 设置输出格式-控制台\n :param console_handle: 终端日志记录器\n :param color_config: 控制台打印颜色配置信息\n :return:\n \"\"\"\n formatter = colorlog.ColoredFormatter(default_formats[\"color_format\"], log_colors=color_config)\n console_handle.setFormatter(formatter)\n\n @staticmethod\n def __set_log_formatter(file_handler):\n \"\"\"\n 设置日志输出格式-日志文件\n :param file_handler: 日志记录器\n \"\"\"\n formatter = logging.Formatter(default_formats[\"log_format\"], datefmt='%a, %d %b %Y %H:%M:%S')\n file_handler.setFormatter(formatter)\n\n @staticmethod\n def __close_handler(file_handler):\n \"\"\"\n 关闭handler\n :param file_handler: 日志记录器\n \"\"\"\n file_handler.close()\n\n def __console(self, level, message):\n \"\"\"构造日志收集器\"\"\"\n all_logger_handler = self.__init_logger_handler(self.__all_log_path) # 创建日志文件\n error_logger_handler = self.__init_logger_handler(self.__error_log_path)\n console_handle = self.__init_console_handle()\n\n self.__set_log_formatter(all_logger_handler) # 设置日志格式\n self.__set_log_formatter(error_logger_handler)\n self.__set_color_formatter(console_handle, log_colors_config)\n\n self.__set_log_handler(all_logger_handler) # 设置handler级别并添加到logger收集器\n self.__set_log_handler(error_logger_handler, level=logging.ERROR)\n self.__set_color_handle(console_handle)\n\n if level == 'info':\n self.__logger.info(message)\n elif level == 'debug':\n self.__logger.debug(message)\n elif level == 'warning':\n self.__logger.warning(message)\n elif level == 'error':\n self.__logger.error(message)\n elif level == 'critical':\n self.__logger.critical(message)\n\n self.__logger.removeHandler(all_logger_handler) # 避免日志输出重复问题\n self.__logger.removeHandler(error_logger_handler)\n self.__logger.removeHandler(console_handle)\n\n self.__close_handler(all_logger_handler) # 关闭handler\n self.__close_handler(error_logger_handler)\n\n def debug(self, message):\n self.__console('debug', message)\n\n def info(self, message):\n self.__console('info', message)\n\n def warning(self, message):\n self.__console('warning', message)\n\n def error(self, message):\n self.__console('error', message)\n\n def critical(self, message):\n self.__console('critical', message)" }, { "identifier": "AudioProcess", "path": "src/audio_bgm_split.py", "snippet": "class AudioProcess:\n def __init__(self, agg, is_half=False, tta=False):\n\n # model_path = os.path.join('weights', 'HP5-主旋律人声vocals+其他instrumentals.pth')\n model_path = load_file_from_url(url=\"https://hf-mirror.com/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5-%E4%B8%BB%E6%97%8B%E5%BE%8B%E4%BA%BA%E5%A3%B0vocals%2B%E5%85%B6%E4%BB%96instrumentals.pth?download=true\", \n model_dir='weights', progress=True, file_name=\"HP5-主旋律人声vocals+其他instrumentals.pth\")\n self.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n self.data = {\n # Processing Options\n \"postprocess\": False,\n \"tta\": tta,\n # Constants\n \"window_size\": 512,\n \"agg\": agg,\n \"high_end_process\": \"mirroring\",\n }\n mp = ModelParameters(\"src/third_part/uvr5_pack/lib_v5/modelparams/4band_v2.json\")\n model = Nets.CascadedASPPNet(mp.param[\"bins\"] * 2)\n cpk = torch.load(model_path, map_location=\"cpu\")\n model.load_state_dict(cpk)\n model.eval()\n if is_half:\n model = model.half().to(self.device)\n else:\n model = model.to(self.device)\n\n self.mp = mp\n self.model = model\n\n def split(self, music_file):\n \n X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {}\n bands_n = len(self.mp.param[\"band\"])\n # print(bands_n)\n for d in range(bands_n, 0, -1):\n bp = self.mp.param[\"band\"][d]\n if d == bands_n: # high-end band\n (\n X_wave[d],\n _,\n ) = librosa.core.load( # 理论上librosa读取可能对某些音频有bug,应该上ffmpeg读取,但是太麻烦了弃坑\n path=music_file,\n sr=bp[\"sr\"],\n mono=False,\n dtype=np.float32,\n res_type=bp[\"res_type\"],\n )\n if X_wave[d].ndim == 1:\n X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]])\n else: # lower bands\n X_wave[d] = librosa.core.resample(\n y=X_wave[d + 1],\n orig_sr=self.mp.param[\"band\"][d + 1][\"sr\"],\n target_sr=bp[\"sr\"],\n res_type=bp[\"res_type\"],\n )\n # Stft of wave source\n X_spec_s[d] = spec_utils.wave_to_spectrogram_mt(\n X_wave[d],\n bp[\"hl\"],\n bp[\"n_fft\"],\n self.mp.param[\"mid_side\"],\n self.mp.param[\"mid_side_b2\"],\n self.mp.param[\"reverse\"],\n )\n # pdb.set_trace()\n if d == bands_n and self.data[\"high_end_process\"] != \"none\":\n input_high_end_h = (bp[\"n_fft\"] // 2 - bp[\"crop_stop\"]) + (\n self.mp.param[\"pre_filter_stop\"] - self.mp.param[\"pre_filter_start\"]\n )\n input_high_end = X_spec_s[d][\n :, bp[\"n_fft\"] // 2 - input_high_end_h : bp[\"n_fft\"] // 2, :\n ]\n\n X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp)\n aggresive_set = float(self.data[\"agg\"] / 100)\n aggressiveness = {\n \"value\": aggresive_set,\n \"split_bin\": self.mp.param[\"band\"][1][\"crop_stop\"],\n }\n with torch.no_grad():\n pred, X_mag, X_phase = inference(\n X_spec_m, self.device, self.model, aggressiveness, self.data\n )\n # Postprocess\n if self.data[\"postprocess\"]:\n pred_inv = np.clip(X_mag - pred, 0, np.inf)\n pred = spec_utils.mask_silence(pred, pred_inv)\n y_spec_m = pred * X_phase\n v_spec_m = X_spec_m - y_spec_m\n\n \n if self.data[\"high_end_process\"].startswith(\"mirroring\"):\n input_high_end_y = spec_utils.mirroring(\n self.data[\"high_end_process\"], y_spec_m, input_high_end, self.mp\n )\n wav_instrument = spec_utils.cmb_spectrogram_to_wave(\n y_spec_m, self.mp, input_high_end_h, input_high_end_y\n )\n \n input_high_end_v = spec_utils.mirroring(\n self.data[\"high_end_process\"], v_spec_m, input_high_end, self.mp\n )\n wav_vocals = spec_utils.cmb_spectrogram_to_wave(\n v_spec_m, self.mp, input_high_end_h, input_high_end_v\n )\n \n else:\n wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp)\n wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp)\n \n logger.info(\"vocal and instruments split done\")\n \n temp_manager = TempFileManager()\n voice_temp_file = temp_manager.create_temp_file(suffix='.wav')\n noise_temp_file = temp_manager.create_temp_file(suffix='.wav')\n \n sf.write(\n voice_temp_file,\n (np.array(wav_vocals) * 32768).astype(\"int16\"),\n self.mp.param[\"sr\"],\n )\n sf.write(\n noise_temp_file,\n (np.array(wav_instrument) * 32768).astype(\"int16\"),\n self.mp.param[\"sr\"],\n )\n return voice_temp_file.name, noise_temp_file.name" }, { "identifier": "VoiceCloner", "path": "src/voice_clone.py", "snippet": "class VoiceCloner:\n\n def __init__(self, version_name=\"v2.0.3\") -> None:\n self.temp_manager = TempFileManager()\n root_path = os.path.join('weights',f\"xtts_{version_name}\")\n config_path = load_file_from_url(url=f\"https://hf-mirror.com/coqui/XTTS-v2/resolve/{version_name}/config.json?download=true\",\n model_dir=root_path,\n file_name=\"config.json\")\n load_file_from_url(url=f\"https://hf-mirror.com/coqui/XTTS-v2/resolve/{version_name}/model.pth?download=true\",\n model_dir=root_path,\n file_name=\"model.pth\")\n load_file_from_url(url=f\"https://hf-mirror.com/coqui/XTTS-v2/resolve/{version_name}/vocab.json?download=true\",\n model_dir=root_path,\n file_name=\"vocab.json\")\n load_file_from_url(url=f\"https://hf-mirror.com/coqui/XTTS-v2/resolve/{version_name}/hash.md5?download=true\",\n model_dir=root_path,\n file_name=\"hash.md5\")\n # model_path = f\"{root_path}/model.pth\"\n # logger.info(f'model_path:{model_path}')\n self.tts = TTS(model_path=root_path,config_path=config_path,gpu=True)\n \n def __call__(self, text, lang_code, speaker_wav,speed=1.0,*args: Any, **kwds: Any) -> Any:\n temp_file = self.temp_manager.create_temp_file(suffix='.wav').name\n self.tts.tts_to_file(text=text,\n language=lang_code,\n speaker_wav=speaker_wav,\n speed=speed,\n file_path=temp_file)\n return temp_file" }, { "identifier": "TempFileManager", "path": "src/temp_manager.py", "snippet": "class TempFileManager:\n _instance = None\n temp_files = []\n\n def __new__(cls):\n if cls._instance is None:\n cls._instance = super().__new__(cls)\n atexit.register(cls.cleanup)\n return cls._instance\n\n def create_temp_file(self, suffix):\n temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=suffix)\n self.temp_files.append(temp_file.name)\n return temp_file\n\n @classmethod\n def cleanup(cls):\n for temp_file in cls.temp_files:\n try:\n # Remove the temporary file\n os.remove(temp_file)\n except OSError:\n pass" }, { "identifier": "Translator", "path": "src/translator.py", "snippet": "class Translator:\n\n def __init__(self,work_with_human=False) -> None:\n # _ = ts.preaccelerate_and_speedtest()\n self.work_with_human = work_with_human\n\n def __call__(self,text,from_lang,to_lang,*args: Any, **kwds: Any) -> Any:\n assert from_lang != to_lang,\"same lang code error,translator only work in language to another language\"\n if self.work_with_human:\n lience = input(\"!!!注意,出现这个提示是因为您自行修改了相关代码,请不要做偏离原文内容的手工翻译,否则后果自负,与该项目开源作者无关!我已经阅读并同意该声明。\\n(!!!Attention!This prompt appears because you modified the code yourself,Please do not deviate from the original content of manual translation, or bear the consequences,It has nothing to do with the author of this project! I have read and agree with the statement)\\t yes | no:\\n\").strip()\n if \"y\" not in lience:\n self.work_with_human = False\n \n if \"zh\" in to_lang:\n to_lang = \"zh\"\n logger.info(f\"{from_lang} {to_lang} {text} \")\n try:\n dst_text = ts.translate_text(query_text=text,translator=\"qqTranSmart\",\n from_language=from_lang,to_language=to_lang)\n except ts.server.TranslatorError:\n dst_text = input(\"translator failed,input by self:\")\n dst_text = dst_text.strip()\n return dst_text\n logger.info(\"dst_text:{}\".format(dst_text))\n if self.work_with_human:\n if_by_hand = input(\"translate by hand? 1 by hand, 0 pass:\\t\")\n if if_by_hand == \"1\":\n dst_text = input(\"input by hand:\\n\").strip()\n logger.info(f\"dst_text edited:{dst_text}\")\n\n return dst_text" }, { "identifier": "LipSync", "path": "src/lipsync.py", "snippet": "class LipSync:\n def __init__(self,model_name) -> None:\n self.model_name = model_name\n self.img_size = 96\n self.static = False\n self.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n self.face_det_batch_size = 16\n self.wav2lip_batch_size = 16\n self.mel_step_size = 16\n self.pads = [0,20,0,0]\n self.nosmooth = True\n self.box = [-1, -1, -1, -1]\n self.fps = 25\n self.resize_factor = 2\n self.rotate = False\n self.crop = [0, -1, 0, -1]\n logger.info('Using {} for inference.'.format(self.device))\n\n load_file_from_url(url=\"https://www.adrianbulat.com/downloads/python-fan/s3fd-619a316812.pth\",\n model_dir=\"src/third_part/wav2lip/face_detection/detection/sfd\",\n file_name=\"s3fd.pth\")\n load_file_from_url(url=\"https://hf-mirror.com/MarjorieSaul/wav2lip_sd_models/resolve/main/wav2lip.pth?download=true\",\n model_dir=\"weights\",\n file_name=\"wav2lip.pth\")\n load_file_from_url(url=\"https://hf-mirror.com/MarjorieSaul/wav2lip_sd_models/resolve/main/wav2lip_gan.pth?download=true\",\n model_dir=\"weights\",\n file_name=\"wav2lip_gan.pth\")\n self.tmp_manager = TempFileManager()\n \n\n def __call__(self, face,audio,outfile,voice,*args: Any, **kwds: Any) -> Any:\n if os.path.isfile(face) and face.split('.')[1] in ['jpg', 'png', 'jpeg']:\n self.static = True\n if not os.path.isfile(face):\n raise ValueError('face argument must be a valid path to video/image file')\n elif face.split('.')[1] in ['jpg', 'png', 'jpeg']:\n full_frames = [cv2.imread(face)]\n fps = self.fps\n else:\n video_stream = cv2.VideoCapture(face)\n fps = video_stream.get(cv2.CAP_PROP_FPS)\n logger.info('Reading video frames...')\n\n full_frames = []\n while 1:\n still_reading, frame = video_stream.read()\n if not still_reading:\n video_stream.release()\n break\n if self.resize_factor > 1:\n frame = cv2.resize(frame, (frame.shape[1]//self.resize_factor, frame.shape[0]//self.resize_factor))\n\n if self.rotate:\n frame = cv2.rotate(frame, cv2.cv2.ROTATE_90_CLOCKWISE)\n \n y1, y2, x1, x2 = self.crop\n if x2 == -1: x2 = frame.shape[1]\n if y2 == -1: y2 = frame.shape[0]\n frame = frame[y1:y2, x1:x2]\n full_frames.append(frame)\n\n logger.info(\"Number of frames available for inference: \"+str(len(full_frames)))\n\n assert audio.endswith('.wav'),\"audio file shoud end with .wav\"\n\n wav = load_wav(audio, sr=16000)\n mel = melspectrogram(wav)\n\n if np.isnan(mel.reshape(-1)).sum() > 0:\n raise ValueError('Mel contains nan! Using a TTS voice? Add a small epsilon noise to the wav file and try again')\n \n mel_chunks = []\n mel_idx_multiplier = 80./fps\n i = 0\n while 1:\n start_idx = int(i * mel_idx_multiplier)\n if start_idx + self.mel_step_size > len(mel[0]):\n mel_chunks.append(mel[:, len(mel[0]) - self.mel_step_size:])\n break\n mel_chunks.append(mel[:, start_idx : start_idx + self.mel_step_size])\n i += 1\n \n logger.info(\"Length of mel chunks: {}\".format(len(mel_chunks)))\n\n full_frames = full_frames[:len(mel_chunks)]\n\n batch_size = self.wav2lip_batch_size\n\n gen = self.datagen(full_frames.copy(), mel_chunks)\n while 1:\n try:\n for i, (img_batch, mel_batch, frames, coords) in enumerate(tqdm(gen,\n total=int(np.ceil(float(len(mel_chunks))/batch_size)))):\n if i == 0:\n model = self.load_model()\n logger.info(\"Model loaded\")\n frame_h, frame_w = full_frames[0].shape[:-1]\n temp_file = self.tmp_manager.create_temp_file(suffix='.avi').name\n out = cv2.VideoWriter(temp_file, \n\t\t\t\t\t\t\t\t\tcv2.VideoWriter_fourcc(*'DIVX'), fps, (frame_w, frame_h))\n img_batch = torch.FloatTensor(np.transpose(img_batch, (0, 3, 1, 2))).to(self.device)\n mel_batch = torch.FloatTensor(np.transpose(mel_batch, (0, 3, 1, 2))).to(self.device)\n\n with torch.no_grad():\n pred = model(mel_batch, img_batch)\n pred = pred.cpu().numpy().transpose(0, 2, 3, 1) * 255.\n for p, f, c in zip(pred, frames, coords):\n y1, y2, x1, x2 = c\n try:\n p = cv2.resize(p.astype(np.uint8), (x2 - x1, y2 - y1))\n f[y1:y2, x1:x2] = p\n except cv2.error:\n pass\n out.write(f)\n out.release()\n except RuntimeError:\n if batch_size == 1: \n raise RuntimeError('Image too big to run wav2lip on GPU. Please use the --resize_factor argument')\n batch_size //= 2\n continue\n break\n command = 'ffmpeg -y -i {} -i {} -strict -2 -q:v 1 {}'.format(voice, temp_file, outfile)\n subprocess.call(command, shell=platform.system() != 'Windows')\n \n def load_model(self):\n model = Wav2Lip()\n logger.info(\"Load checkpoint from: {}\".format(self.model_name))\n checkpoint = self._load()\n s = checkpoint[\"state_dict\"]\n new_s = {}\n for k, v in s.items():\n new_s[k.replace('module.', '')] = v\n model.load_state_dict(new_s)\n model = model.to(self.device)\n return model.eval()\n\n def _load(self):\n checkpoint_path = \"weights/{}.pth\".format(self.model_name)\n if self.device == 'cuda':\n checkpoint = torch.load(checkpoint_path)\n else:\n checkpoint = torch.load(checkpoint_path,\n map_location=lambda storage, loc: storage)\n return checkpoint\n \n\n def datagen(self,frames, mels):\n img_batch, mel_batch, frame_batch, coords_batch = [], [], [], []\n\n if self.box[0] == -1:\n if not self.static:\n face_det_results = self.face_detect(frames) # BGR2RGB for CNN face detection\n else:\n face_det_results = self.face_detect([frames[0]])\n else:\n logger.info('Using the specified bounding box instead of face detection...')\n y1, y2, x1, x2 = self.box\n face_det_results = [[f[y1: y2, x1:x2], (y1, y2, x1, x2)] for f in frames]\n \n for i, m in enumerate(mels):\n idx = 0 if self.static else i%len(frames)\n frame_to_save = frames[idx].copy()\n face, coords = face_det_results[idx].copy()\n\n try:\n face = cv2.resize(face, (self.img_size, self.img_size))\n except cv2.error:\n face = np.zeros((10, 10,3), np.uint8)\n face = cv2.resize(face, (self.img_size, self.img_size))\n \n img_batch.append(face)\n mel_batch.append(m)\n frame_batch.append(frame_to_save)\n coords_batch.append(coords)\n\n if len(img_batch) >= self.wav2lip_batch_size:\n img_batch, mel_batch = np.asarray(img_batch), np.asarray(mel_batch)\n\n img_masked = img_batch.copy()\n img_masked[:, self.img_size//2:] = 0\n img_batch = np.concatenate((img_masked, img_batch), axis=3) / 255.\n mel_batch = np.reshape(mel_batch, [len(mel_batch), mel_batch.shape[1], mel_batch.shape[2], 1])\n yield img_batch, mel_batch, frame_batch, coords_batch\n img_batch, mel_batch, frame_batch, coords_batch = [], [], [], []\n \n \n if len(img_batch) > 0:\n img_batch, mel_batch = np.asarray(img_batch), np.asarray(mel_batch)\n img_masked = img_batch.copy()\n img_masked[:, self.img_size//2:] = 0\n img_batch = np.concatenate((img_masked, img_batch), axis=3) / 255.\n mel_batch = np.reshape(mel_batch, [len(mel_batch), mel_batch.shape[1], mel_batch.shape[2], 1])\n yield img_batch, mel_batch, frame_batch, coords_batch\n\n\n def face_detect(self,images):\n detector = face_detection.FaceAlignment(face_detection.LandmarksType._2D,\n flip_input=False,device=self.device\n )\n batch_size = self.face_det_batch_size\n while 1:\n predictions = []\n try:\n for i in tqdm(range(0,len(images),batch_size)):\n predictions.extend(detector.get_detections_for_batch(np.array(images[i:i + batch_size])))\n except RuntimeError:\n if batch_size == 1: \n raise RuntimeError('Image too big to run face detection on GPU. Please use the --resize_factor argument')\n batch_size //= 2\n logger.warning('Recovering from OOM error; New batch size: {}'.format(batch_size))\n continue\n break\n results = []\n pady1, pady2, padx1, padx2 = self.pads\n for rect, image in zip(predictions, images):\n if rect is None:\n rect = (0,20,0,0)\n y1 = max(0, rect[1] - pady1)\n y2 = min(image.shape[0], rect[3] + pady2)\n x1 = max(0, rect[0] - padx1)\n x2 = min(image.shape[1], rect[2] + padx2)\n results.append([x1,y1,x2,y2])\n boxes = np.array(results)\n if not self.nosmooth: boxes = self.get_smoothened_boxes(boxes, T=5)\n results = [[image[y1: y2, x1:x2], (y1, y2, x1, x2)] for image, (x1, y1, x2, y2) in zip(images, boxes)]\n import gc; gc.collect(); torch.cuda.empty_cache();del detector\n return results\n\n def get_smoothened_boxes(self,boxes, T):\n for i in range(len(boxes)):\n if i + T > len(boxes):\n window = boxes[len(boxes) - T:]\n else:\n window = boxes[i : i + T]\n boxes[i] = np.mean(window, axis=0)\n return boxes" }, { "identifier": "Upscale", "path": "src/upscale.py", "snippet": "class Upscale:\n def __init__(self,fidelity_weight=0.9) -> None:\n self.pretrain_model_url = {\n 'restoration': 'https://mirror.ghproxy.com/https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth',\n }\n self.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n self.use_half = True \n self.bg_tile = 400\n self.w = fidelity_weight\n self.bg_upsampler = 'realesrgan'\n self.face_upsample = True\n self.has_aligned = False\n self.detection_model = \"retinaface_resnet50\"\n self.upscale = 2\n self.only_center_face = False\n self.draw_box = False\n self.suffix = None\n\n\n def __call__(self,input_path:str,output_path:str,audio,*args: Any, **kwds: Any) -> Any:\n \n input_video = False\n if input_path.endswith(('jpg', 'jpeg', 'png', 'JPG', 'JPEG', 'PNG')): # input single img path\n input_img_list = [input_path]\n result_root = f'results/test_img_{self.w}'\n elif input_path.endswith(('mp4', 'mov', 'avi', 'MP4', 'MOV', 'AVI')): # input video path\n input_img_list = []\n vidreader = VideoReader(input_path)\n image = vidreader.get_frame()\n while image is not None:\n input_img_list.append(image)\n image = vidreader.get_frame()\n # audio = vidreader.get_audio()\n fps = vidreader.get_fps() \n video_name = os.path.basename(input_path)[:-4]\n result_root = f'results/{video_name}_{self.w}'\n input_video = True\n vidreader.close()\n else: # input img folder\n if input_path.endswith('/'): # solve when path ends with /\n input_path = input_path[:-1]\n # scan all the jpg and png images\n input_img_list = sorted(glob.glob(os.path.join(input_path, '*.[jpJP][pnPN]*[gG]')))\n result_root = f'results/{os.path.basename(input_path)}_{self.w}'\n \n if not output_path is None: # set output path\n result_root = output_path\n\n test_img_num = len(input_img_list)\n if test_img_num == 0:\n raise FileNotFoundError('No input image/video is found...\\n' \n '\\tNote that --input_path for video should end with .mp4|.mov|.avi')\n\n # ------------------ set up background upsampler ------------------\n if self.bg_upsampler == 'realesrgan':\n bg_upsampler = self.set_realesrgan()\n else:\n bg_upsampler = None\n \n # ------------------ set up face upsampler ------------------\n if self.face_upsample:\n if bg_upsampler is not None:\n face_upsampler = bg_upsampler\n else:\n face_upsampler = self.set_realesrgan()\n else:\n face_upsampler = None\n \n # ------------------ set up CodeFormer restorer -------------------\n net = CodeFormer(dim_embd=512, codebook_size=1024, n_head=8, n_layers=9, \n connect_list=['32', '64', '128', '256']).to(self.device)\n # ckpt_path = 'weights/CodeFormer/codeformer.pth'\n ckpt_path = load_file_from_url(url=self.pretrain_model_url['restoration'], \n model_dir='weights/CodeFormer', progress=True, file_name=None)\n checkpoint = torch.load(ckpt_path)['params_ema']\n net.load_state_dict(checkpoint)\n net.eval()\n\n # ------------------ set up FaceRestoreHelper -------------------\n # large det_model: 'YOLOv5l', 'retinaface_resnet50'\n # small det_model: 'YOLOv5n', 'retinaface_mobile0.25'\n if not self.has_aligned: \n logger.info(f'Face detection model: {self.detection_model}')\n if bg_upsampler is not None: \n logger.info(f'Background upsampling: True, Face upsampling: {self.face_upsample}')\n else:\n logger.info(f'Background upsampling: False, Face upsampling: {self.face_upsample}')\n\n # -------------------- start to processing ---------------------\n logger.info(\"multi thread processing \")\n '''\n with ThreadPoolExecutor(max_workers=20) as executor:\n for i, img_path in enumerate(input_img_list):\n executor.submit(self.enhance_face,img_path,i,video_name,test_img_num,\n bg_upsampler,result_root,input_video,net,face_upsampler)\n '''\n Parallel(n_jobs=4)(delayed(self.enhance_face)(img_path,i,video_name,test_img_num,\\\n bg_upsampler,result_root,input_video,\\\n net,face_upsampler) for i,img_path in enumerate(input_img_list))\n\n # save enhanced video\n if input_video:\n logger.info('Video Saving...')\n # load images\n video_frames = []\n img_list = sorted(glob.glob(os.path.join(result_root, 'final_results', '*.[jp][pn]g')))\n for img_path in img_list:\n img = cv2.imread(img_path)\n video_frames.append(img)\n # write images to video\n height, width = video_frames[0].shape[:2]\n if self.suffix is not None:\n video_name = f'{video_name}_{self.suffix}.png'\n save_restore_path = os.path.join(result_root, f'{video_name}.avi')\n vidwriter = cv2.VideoWriter(save_restore_path,cv2.VideoWriter_fourcc(*'DIVX'),fps, (width, height))\n \n for f in tqdm(video_frames,desc=\"Combining png to avi...\",total=len(video_frames)):\n vidwriter.write(f)\n \n vidwriter.release()\n \n out_file = os.path.join(result_root, f'{video_name}.mp4')\n command = 'ffmpeg -y -i {} -i {} -strict -2 -q:v 1 {}'.format(audio, save_restore_path, out_file)\n subprocess.call(command, shell=platform.system() != 'Windows')\n\n logger.info(f'\\nAll results are saved in {result_root}')\n\n def enhance_face(self,img_path,i,video_name,test_img_num,bg_upsampler,result_root,input_video,net,face_upsampler):\n # clean all the intermediate results to process the next image\n face_helper = FaceRestoreHelper(\n self.upscale,\n face_size=512,\n crop_ratio=(1, 1),\n det_model = self.detection_model,\n save_ext='png',\n use_parse=True,\n device=self.device)\n with num_lock:\n if isinstance(img_path, str):\n img_name = os.path.basename(img_path)\n basename, ext = os.path.splitext(img_name)\n logger.info(f'[{i+1}/{test_img_num}] Processing: {img_name}')\n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n else: # for video processing\n basename = str(i).zfill(6)\n img_name = f'{video_name}_{basename}' if input_video else basename\n logger.info(f'[{i+1}/{test_img_num}] Processing: {img_name}')\n img = img_path\n\n if self.has_aligned: \n # the input faces are already cropped and aligned\n img = cv2.resize(img, (512, 512), interpolation=cv2.INTER_LINEAR)\n # face_helper.is_gray = is_gray(img, threshold=10)\n if face_helper.is_gray:\n logger.info('Grayscale input: True')\n face_helper.cropped_faces = [img]\n else:\n face_helper.read_image(img)\n # get face landmarks for each face\n num_det_faces = face_helper.get_face_landmarks_5(\n only_center_face=self.only_center_face, resize=640, eye_dist_threshold=5)\n logger.info(f'\\tdetect {num_det_faces} faces')\n # align and warp each face\n face_helper.align_warp_face()\n\n # face restoration for each cropped face\n for idx, cropped_face in enumerate(face_helper.cropped_faces):\n # prepare data\n cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True)\n normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)\n cropped_face_t = cropped_face_t.unsqueeze(0).to(self.device)\n\n try:\n with torch.no_grad():\n output = net(cropped_face_t, w=self.w, adain=True)[0]\n restored_face = tensor2img(output, rgb2bgr=True, min_max=(-1, 1))\n del output\n torch.cuda.empty_cache()\n except Exception as error:\n logger.info(f'\\tFailed inference for CodeFormer: {error}')\n restored_face = tensor2img(cropped_face_t, rgb2bgr=True, min_max=(-1, 1))\n\n restored_face = restored_face.astype('uint8')\n face_helper.add_restored_face(restored_face, cropped_face)\n\n # paste_back\n if not self.has_aligned:\n # upsample the background\n if bg_upsampler is not None:\n # Now only support RealESRGAN for upsampling background\n bg_img = bg_upsampler.enhance(img, outscale=self.upscale)[0]\n else:\n bg_img = None\n face_helper.get_inverse_affine(None)\n # paste each restored face to the input image\n if self.face_upsample and face_upsampler is not None: \n restored_img = face_helper.paste_faces_to_input_image(upsample_img=bg_img, draw_box=self.draw_box, face_upsampler=face_upsampler)\n else:\n restored_img = face_helper.paste_faces_to_input_image(upsample_img=bg_img, draw_box=self.draw_box)\n \n \n # save faces\n for idx, (cropped_face, restored_face) in enumerate(zip(face_helper.cropped_faces, face_helper.restored_faces)):\n # save cropped face\n if not self.has_aligned: \n save_crop_path = os.path.join(result_root, 'cropped_faces', f'{basename}_{idx:02d}.png')\n imwrite(cropped_face, save_crop_path)\n # save restored face\n if self.has_aligned:\n save_face_name = f'{basename}.png'\n else:\n save_face_name = f'{basename}_{idx:02d}.png'\n if self.suffix is not None:\n save_face_name = f'{save_face_name[:-4]}_{self.suffix}.png'\n save_restore_path = os.path.join(result_root, 'restored_faces', save_face_name)\n imwrite(restored_face, save_restore_path)\n \n # save restored img\n if not self.has_aligned and restored_img is not None:\n if self.suffix is not None:\n basename = f'{basename}_{self.suffix}'\n save_restore_path = os.path.join(result_root, 'final_results', f'{basename}.png')\n imwrite(restored_img, save_restore_path)\n\n\n def set_realesrgan(self):\n if torch.cuda.is_available():\n no_half_gpu_list = ['1650', '1660'] # set False for GPUs that don't support f16\n if not True in [gpu in torch.cuda.get_device_name(0) for gpu in no_half_gpu_list]:\n self.use_half = True\n model = RRDBNet(\n num_in_ch=3,\n num_out_ch=3,\n num_feat=64,\n num_block=23,\n num_grow_ch=32,\n scale=2\n )\n upsampler = RealESRGANer(\n scale=2,\n model_path=\"https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/RealESRGAN_x2plus.pth\",\n model=model,\n tile=self.bg_tile,\n tile_pad=40,\n pre_pad=0,\n half=self.use_half\n )\n if not torch.cuda.is_available():\n logger.warning('Running on CPU now! Make sure your PyTorch version matches your CUDA.')\n return upsampler" }, { "identifier": "analyse_video", "path": "src/nfsw.py", "snippet": "@lru_cache(maxsize = None)\ndef analyse_video(video_path : str) -> bool:\n\tvideo_frame_total = count_video_frame_total(video_path)\n\tfps = detect_fps(video_path)\n\tframe_range = range( 0, video_frame_total)\n\trate = 0.0\n\tcounter = 0\n\twith tqdm(total = len(frame_range), desc = 'video content analysing', unit = 'frame', ascii = ' =') as progress:\n\t\tfor frame_number in frame_range:\n\t\t\tif frame_number % int(fps) == 0:\n\t\t\t\tframe = get_video_frame(video_path, frame_number)\n\t\t\t\tif analyse_frame(frame):\n\t\t\t\t\tcounter += 1\n\t\t\trate = counter * int(fps) / len(frame_range) * 100\n\t\t\tprogress.update()\n\t\t\tprogress.set_postfix(rate = rate)\n\treturn rate > MAX_RATE" }, { "identifier": "load_model", "path": "src/third_part/whisperx/transcribe.py", "snippet": "def cli():" }, { "identifier": "load_audio", "path": "src/third_part/whisperx/audio.py", "snippet": "def load_audio(file: str, sr: int = SAMPLE_RATE):\n \"\"\"\n Open an audio file and read as mono waveform, resampling as necessary\n\n Parameters\n ----------\n file: str\n The audio file to open\n\n sr: int\n The sample rate to resample the audio if necessary\n\n Returns\n -------\n A NumPy array containing the audio waveform, in float32 dtype.\n \"\"\"\n try:\n # Launches a subprocess to decode audio while down-mixing and resampling as necessary.\n # Requires the ffmpeg CLI to be installed.\n cmd = [\n \"ffmpeg\",\n \"-nostdin\",\n \"-threads\",\n \"0\",\n \"-i\",\n file,\n \"-f\",\n \"s16le\",\n \"-ac\",\n \"1\",\n \"-acodec\",\n \"pcm_s16le\",\n \"-ar\",\n str(sr),\n \"-\",\n ]\n out = subprocess.run(cmd, capture_output=True, check=True).stdout\n except subprocess.CalledProcessError as e:\n raise RuntimeError(f\"Failed to load audio: {e.stderr.decode()}\") from e\n\n return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0" }, { "identifier": "DiarizationPipeline", "path": "src/third_part/whisperx/diarize.py", "snippet": "class DiarizationPipeline:\n def __init__(\n self,\n model_name=\"pyannote/speaker-diarization-3.0\",\n use_auth_token=None,\n device: Optional[Union[str, torch.device]] = \"cpu\",\n ):\n if isinstance(device, str):\n device = torch.device(device)\n self.model = Pipeline.from_pretrained(model_name, use_auth_token=use_auth_token).to(device)\n\n def __call__(self, audio: Union[str, np.ndarray], min_speakers=None, max_speakers=None):\n if isinstance(audio, str):\n audio = load_audio(audio)\n audio_data = {\n 'waveform': torch.from_numpy(audio[None, :]),\n 'sample_rate': SAMPLE_RATE\n }\n segments = self.model(audio_data, min_speakers=min_speakers, max_speakers=max_speakers)\n diarize_df = pd.DataFrame(segments.itertracks(yield_label=True), columns=['segment', 'label', 'speaker'])\n diarize_df['start'] = diarize_df['segment'].apply(lambda x: x.start)\n diarize_df['end'] = diarize_df['segment'].apply(lambda x: x.end)\n return diarize_df" } ]
import os import torch import soundfile as sf import gc; gc.collect(); torch.cuda.empty_cache(); del cloner import gc; gc.collect(); torch.cuda.empty_cache(); del diarize_model import gc; gc.collect(); torch.cuda.empty_cache(); del whisper from typing import Any from tqdm import tqdm from src.log_helper import HandleLog from moviepy.editor import VideoFileClip,concatenate_videoclips from pathlib import Path from pydub import AudioSegment from src.audio_bgm_split import AudioProcess from src.voice_clone import VoiceCloner from src.temp_manager import TempFileManager from src.translator import Translator from src.lipsync import LipSync from src.upscale import Upscale from src.nfsw import analyse_video from src.third_part.whisperx import load_model,load_audio,DiarizationPipeline
10,879
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com' logger = HandleLog() class Core: def __init__(self, args) -> None: cur_path = os.path.dirname(os.path.realpath(__file__)) # current path self.weights_path = os.path.join(os.path.dirname(cur_path), 'weights') # weights_path to save model if not os.path.exists(self.weights_path): os.mkdir(self.weights_path) # self.input_file = args.input_file_path self.output_file = args.output_file_path self.lang_code = args.lang_code self.device = "cuda" if torch.cuda.is_available() else "cpu" self.hf_token = args.hf_token self.temp_manager = TempFileManager() self.translotor = Translator() self.model_name = args.model_name self.xt_version_name = args.xt_version_name if analyse_video(args.input_file_path): raise("sorry! nativespeaker is not for you") def __call__(self, *args: Any, **kwds: Any) -> Any: logger.critical("[Step 1] Moviepy split voice and frames from video") org_voice_path = os.path.join(Path(self.input_file).parent, "org_voice.wav") org_video_clip = VideoFileClip(self.input_file) org_video_clip.audio.write_audiofile(org_voice_path,codec='pcm_s16le') logger.info("save original voice in {}".format(org_voice_path)) logger.critical("[Step 2] H5 Split vocal and bgm from voice")
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com' logger = HandleLog() class Core: def __init__(self, args) -> None: cur_path = os.path.dirname(os.path.realpath(__file__)) # current path self.weights_path = os.path.join(os.path.dirname(cur_path), 'weights') # weights_path to save model if not os.path.exists(self.weights_path): os.mkdir(self.weights_path) # self.input_file = args.input_file_path self.output_file = args.output_file_path self.lang_code = args.lang_code self.device = "cuda" if torch.cuda.is_available() else "cpu" self.hf_token = args.hf_token self.temp_manager = TempFileManager() self.translotor = Translator() self.model_name = args.model_name self.xt_version_name = args.xt_version_name if analyse_video(args.input_file_path): raise("sorry! nativespeaker is not for you") def __call__(self, *args: Any, **kwds: Any) -> Any: logger.critical("[Step 1] Moviepy split voice and frames from video") org_voice_path = os.path.join(Path(self.input_file).parent, "org_voice.wav") org_video_clip = VideoFileClip(self.input_file) org_video_clip.audio.write_audiofile(org_voice_path,codec='pcm_s16le') logger.info("save original voice in {}".format(org_voice_path)) logger.critical("[Step 2] H5 Split vocal and bgm from voice")
audio_process = AudioProcess(15)
1
2023-12-01 12:23:19+00:00
16k
skhu101/GauHuman
scene/dataset_readers.py
[ { "identifier": "read_extrinsics_text", "path": "scene/colmap_loader.py", "snippet": "def read_extrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n images = {}\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n image_id = int(elems[0])\n qvec = np.array(tuple(map(float, elems[1:5])))\n tvec = np.array(tuple(map(float, elems[5:8])))\n camera_id = int(elems[8])\n image_name = elems[9]\n elems = fid.readline().split()\n xys = np.column_stack([tuple(map(float, elems[0::3])),\n tuple(map(float, elems[1::3]))])\n point3D_ids = np.array(tuple(map(int, elems[2::3])))\n images[image_id] = Image(\n id=image_id, qvec=qvec, tvec=tvec,\n camera_id=camera_id, name=image_name,\n xys=xys, point3D_ids=point3D_ids)\n return images" }, { "identifier": "read_intrinsics_text", "path": "scene/colmap_loader.py", "snippet": "def read_intrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n cameras = {}\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n camera_id = int(elems[0])\n model = elems[1]\n assert model == \"PINHOLE\", \"While the loader support other types, the rest of the code assumes PINHOLE\"\n width = int(elems[2])\n height = int(elems[3])\n params = np.array(tuple(map(float, elems[4:])))\n cameras[camera_id] = Camera(id=camera_id, model=model,\n width=width, height=height,\n params=params)\n return cameras" }, { "identifier": "qvec2rotmat", "path": "scene/colmap_loader.py", "snippet": "def qvec2rotmat(qvec):\n return np.array([\n [1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,\n 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],\n 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],\n [2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],\n 1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,\n 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],\n [2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],\n 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],\n 1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])" }, { "identifier": "read_extrinsics_binary", "path": "scene/colmap_loader.py", "snippet": "def read_extrinsics_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadImagesBinary(const std::string& path)\n void Reconstruction::WriteImagesBinary(const std::string& path)\n \"\"\"\n images = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_reg_images = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_reg_images):\n binary_image_properties = read_next_bytes(\n fid, num_bytes=64, format_char_sequence=\"idddddddi\")\n image_id = binary_image_properties[0]\n qvec = np.array(binary_image_properties[1:5])\n tvec = np.array(binary_image_properties[5:8])\n camera_id = binary_image_properties[8]\n image_name = \"\"\n current_char = read_next_bytes(fid, 1, \"c\")[0]\n while current_char != b\"\\x00\": # look for the ASCII 0 entry\n image_name += current_char.decode(\"utf-8\")\n current_char = read_next_bytes(fid, 1, \"c\")[0]\n num_points2D = read_next_bytes(fid, num_bytes=8,\n format_char_sequence=\"Q\")[0]\n x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,\n format_char_sequence=\"ddq\"*num_points2D)\n xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),\n tuple(map(float, x_y_id_s[1::3]))])\n point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))\n images[image_id] = Image(\n id=image_id, qvec=qvec, tvec=tvec,\n camera_id=camera_id, name=image_name,\n xys=xys, point3D_ids=point3D_ids)\n return images" }, { "identifier": "read_intrinsics_binary", "path": "scene/colmap_loader.py", "snippet": "def read_intrinsics_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::WriteCamerasBinary(const std::string& path)\n void Reconstruction::ReadCamerasBinary(const std::string& path)\n \"\"\"\n cameras = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_cameras = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_cameras):\n camera_properties = read_next_bytes(\n fid, num_bytes=24, format_char_sequence=\"iiQQ\")\n camera_id = camera_properties[0]\n model_id = camera_properties[1]\n model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name\n width = camera_properties[2]\n height = camera_properties[3]\n num_params = CAMERA_MODEL_IDS[model_id].num_params\n params = read_next_bytes(fid, num_bytes=8*num_params,\n format_char_sequence=\"d\"*num_params)\n cameras[camera_id] = Camera(id=camera_id,\n model=model_name,\n width=width,\n height=height,\n params=np.array(params))\n assert len(cameras) == num_cameras\n return cameras" }, { "identifier": "read_points3D_binary", "path": "scene/colmap_loader.py", "snippet": "def read_points3D_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadPoints3DBinary(const std::string& path)\n void Reconstruction::WritePoints3DBinary(const std::string& path)\n \"\"\"\n\n\n with open(path_to_model_file, \"rb\") as fid:\n num_points = read_next_bytes(fid, 8, \"Q\")[0]\n\n xyzs = np.empty((num_points, 3))\n rgbs = np.empty((num_points, 3))\n errors = np.empty((num_points, 1))\n\n for p_id in range(num_points):\n binary_point_line_properties = read_next_bytes(\n fid, num_bytes=43, format_char_sequence=\"QdddBBBd\")\n xyz = np.array(binary_point_line_properties[1:4])\n rgb = np.array(binary_point_line_properties[4:7])\n error = np.array(binary_point_line_properties[7])\n track_length = read_next_bytes(\n fid, num_bytes=8, format_char_sequence=\"Q\")[0]\n track_elems = read_next_bytes(\n fid, num_bytes=8*track_length,\n format_char_sequence=\"ii\"*track_length)\n xyzs[p_id] = xyz\n rgbs[p_id] = rgb\n errors[p_id] = error\n return xyzs, rgbs, errors" }, { "identifier": "read_points3D_text", "path": "scene/colmap_loader.py", "snippet": "def read_points3D_text(path):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadPoints3DText(const std::string& path)\n void Reconstruction::WritePoints3DText(const std::string& path)\n \"\"\"\n xyzs = None\n rgbs = None\n errors = None\n num_points = 0\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n num_points += 1\n\n\n xyzs = np.empty((num_points, 3))\n rgbs = np.empty((num_points, 3))\n errors = np.empty((num_points, 1))\n count = 0\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n xyz = np.array(tuple(map(float, elems[1:4])))\n rgb = np.array(tuple(map(int, elems[4:7])))\n error = np.array(float(elems[7]))\n xyzs[count] = xyz\n rgbs[count] = rgb\n errors[count] = error\n count += 1\n\n return xyzs, rgbs, errors" }, { "identifier": "getWorld2View2", "path": "utils/graphics_utils.py", "snippet": "def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):\n Rt = np.zeros((4, 4))\n Rt[:3, :3] = R.transpose()\n Rt[:3, 3] = t\n Rt[3, 3] = 1.0\n\n C2W = np.linalg.inv(Rt)\n cam_center = C2W[:3, 3]\n cam_center = (cam_center + translate) * scale\n C2W[:3, 3] = cam_center\n Rt = np.linalg.inv(C2W)\n return np.float32(Rt)" }, { "identifier": "focal2fov", "path": "utils/graphics_utils.py", "snippet": "def focal2fov(focal, pixels):\n return 2*math.atan(pixels/(2*focal))" }, { "identifier": "fov2focal", "path": "utils/graphics_utils.py", "snippet": "def fov2focal(fov, pixels):\n return pixels / (2 * math.tan(fov / 2))" }, { "identifier": "SH2RGB", "path": "utils/sh_utils.py", "snippet": "def SH2RGB(sh):\n return sh * C0 + 0.5" }, { "identifier": "BasicPointCloud", "path": "scene/gaussian_model.py", "snippet": "class GaussianModel:\n def setup_functions(self):\n def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation, transform):\n def __init__(self, sh_degree : int, smpl_type : str, motion_offset_flag : bool, actor_gender: str):\n def capture(self):\n def restore(self, model_args, training_args):\n def get_scaling(self):\n def get_rotation(self):\n def get_xyz(self):\n def get_features(self):\n def get_opacity(self):\n def get_covariance(self, scaling_modifier = 1, transform=None):\n def oneupSHdegree(self):\n def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float):\n def training_setup(self, training_args):\n def update_learning_rate(self, iteration):\n def construct_list_of_attributes(self):\n def save_ply(self, path):\n def reset_opacity(self):\n def load_ply(self, path):\n def replace_tensor_to_optimizer(self, tensor, name):\n def _prune_optimizer(self, mask):\n def prune_points(self, mask):\n def cat_tensors_to_optimizer(self, tensors_dict):\n def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation):\n def densify_and_split(self, grads, grad_threshold, scene_extent, N=2):\n def densify_and_clone(self, grads, grad_threshold, scene_extent):\n def kl_densify_and_clone(self, grads, grad_threshold, scene_extent, kl_threshold=0.4):\n def kl_densify_and_split(self, grads, grad_threshold, scene_extent, kl_threshold=0.4, N=2):\n def kl_merge(self, grads, grad_threshold, scene_extent, kl_threshold=0.1):\n def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size, kl_threshold=0.4, t_vertices=None, iter=None):\n def kl_div(self, mu_0, rotation_0_q, scaling_0_diag, mu_1, rotation_1_q, scaling_1_diag):\n def add_densification_stats(self, viewspace_point_tensor, update_filter):\n def coarse_deform_c2source(self, query_pts, params, t_params, t_vertices, lbs_weights=None, correct_Rs=None, return_transl=False):\ndef read_pickle(pkl_path):\ndef SMPL_to_tensor(params, device):\ndef batch_rodrigues_torch(poses):\ndef get_rigid_transformation_torch(rot_mats, joints, parents):\ndef get_transform_params_torch(smpl, params, rot_mats=None, correct_Rs=None):\ndef batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):\n L = build_scaling_rotation(scaling_modifier * scaling, rotation)\n L_0 = rotation_0 @ scaling_0\n A = torch.matmul(bweights, A.reshape(bs, joints_num, -1))\n A = torch.reshape(A, (bs, -1, 4, 4))\n A = torch.matmul(bweights, self.s_A.reshape(bs, joints_num, -1))\n A = torch.reshape(A, (bs, -1, 4, 4))\n K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1)\n K = K.reshape([batch_size, 3, 3])\n A = get_rigid_transformation_torch(rot_mats, joints, parents)\n R = params['R'] \n K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)\n K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \\\n .view((batch_size, 3, 3))" }, { "identifier": "SMPL", "path": "smpl/smpl_numpy.py", "snippet": "class SMPL():\n def __init__(self, sex, model_dir):\n super(SMPL, self).__init__()\n\n model_paths = {\n 'male': os.path.join(model_dir, MALE_PATH),\n 'female': os.path.join(model_dir, FEMALE_PATH),\n # 'neutral': os.path.join(model_dir, NEUTRAL_PATH)\n 'neutral': os.path.join('assets/SMPL_NEUTRAL.pkl')\n }\n\n with open(model_paths[sex], 'rb') as f:\n smpl_model = pickle.load(f, encoding='latin1')\n self.J_regressor = np.array(smpl_model['J_regressor'].todense()) # (24, 6890)\n self.weights = smpl_model['weights'] # (6890, 24)\n self.posedirs = smpl_model['posedirs'] # (6890, 3, 207)\n self.v_template = smpl_model['v_template'] # (6890, 3)\n self.shapedirs = np.array(smpl_model['shapedirs']) # (6890, 3, 10)\n self.faces = smpl_model['f'].astype('int32') # (13776, 3)\n self.kintree_table = smpl_model['kintree_table'].astype('int64') # (2, 24)\n\n id_to_col = {self.kintree_table[1, i].item(): i for i in range(self.kintree_table.shape[1])}\n self.parent = np.array([id_to_col[self.kintree_table[0, it]] for it in range(1, self.kintree_table.shape[1])])\n\n self.pose_shape = [24, 3]\n self.beta_shape = [10]\n self.pose = np.zeros(self.pose_shape)\n self.beta = np.zeros(self.beta_shape)\n\n self.verts = None\n self.J = None\n self.R = None\n\n def __call__(self, pose, beta):\n\n v_template = self.v_template # (6890, 3)\n shapedirs = self.shapedirs.reshape(-1,10) # (6890*3, 10)\n beta = beta[:, None] # (10, 1)\n\n v_shaped = shapedirs.dot(beta).reshape(6890, 3) + v_template # (6890, 3)\n J = self.J_regressor.dot(v_shaped) # (24, 3)\n\n # input is a rotation matrix: (24,3,3)\n if pose.shape == (24, 3, 3):\n R = pose\n # input is a rotation axis-angle vector: (1, 72), (72, 1) or (72, )\n elif pose.shape == (1, 72) or pose.shape == (72, 1) or pose.shape == (72,):\n pose_vectors = pose.reshape(-1, 3) # (24, 3)\n R = np.array([rodrigues(pose_vectors[p_idx])[0] \n for p_idx in range(pose_vectors.shape[0])\n ], \n dtype='float32') # (24, 3, 3)\n else:\n raise ValueError(\"Unsupported Pose Inputs - the Pose Shape is {}\".format(pose.shape))\n\n Is = np.eye(3, dtype='float32')[None, :] # (1, 3, 3)\n lrotmin = (R[1:,:] - Is).reshape(-1, 1) # (23x3x3, 1)\n posedirs = self.posedirs.reshape(-1,207) # (6890x3, 207)\n v_posed = v_shaped + posedirs.dot(lrotmin).reshape(6890, 3) # (6890, 3)\n\n J_ = J.copy()\n J_[1:, :] = J[1:, :] - J[self.parent, :] # (24, 3)\n G_ = np.concatenate([R, J_[:, :, None]], axis=-1) # (24, 3, 4)\n pad_rows = np.array([[0, 0, 0, 1]], dtype='float32')\n pad_rows = np.repeat(pad_rows, 24, axis=0).reshape(-1, 1, 4)\n G_ = np.concatenate([G_, pad_rows], axis=1) # (24, 4, 4)\n\n G = [G_[0].copy()]\n for i in range(1, 24):\n G.append(G[self.parent[i-1]].dot(G_[i, :, :]))\n G = np.stack(G, axis=0) # (24, 4, 4)\n\n joints = G[:, :3, 3]\n rest_joints = np.concatenate([J, np.zeros((24, 1))], axis=-1)[:, :, None] # (24, 4, 1)\n zeros = np.zeros((24, 4, 3), dtype='float32') # (24, 4, 3)\n rest_joints_mtx = np.concatenate([zeros, rest_joints], axis=-1) # (24, 4, 4) \n # print(\"G1: \", G[0], \"rest_joints_mtx1: \", rest_joints_mtx[0])\n posed_joints_mtx = np.matmul(G, rest_joints_mtx)\n # print(\"rest_joints_mtx2: \", posed_joints_mtx[0])\n G = G - posed_joints_mtx\n # print(G[0]) \n rest_shape_h = np.concatenate([v_posed, np.ones(v_posed.shape[0])[:, None]], axis=-1) #(6890, 4)\n T = self.weights.dot(G.reshape(24, -1)).reshape(6890, 4, 4)\n v = np.matmul(T, rest_shape_h[:, :, None])[:, :3, 0]\n \n return v, joints" }, { "identifier": "SMPLX", "path": "smplx/body_models.py", "snippet": "class SMPLX(SMPLH):\n '''\n SMPL-X (SMPL eXpressive) is a unified body model, with shape parameters\n trained jointly for the face, hands and body.\n SMPL-X uses standard vertex based linear blend skinning with learned\n corrective blend shapes, has N=10475 vertices and K=54 joints,\n which includes joints for the neck, jaw, eyeballs and fingers.\n '''\n\n NUM_BODY_JOINTS = SMPLH.NUM_BODY_JOINTS\n NUM_HAND_JOINTS = 15\n NUM_FACE_JOINTS = 3\n NUM_JOINTS = NUM_BODY_JOINTS + 2 * NUM_HAND_JOINTS + NUM_FACE_JOINTS\n EXPRESSION_SPACE_DIM = 100\n NECK_IDX = 12\n\n def __init__(\n self, model_path: str,\n kid_template_path: str = '',\n num_expression_coeffs: int = 10,\n create_expression: bool = True,\n expression: Optional[Tensor] = None,\n create_jaw_pose: bool = True,\n jaw_pose: Optional[Tensor] = None,\n create_leye_pose: bool = True,\n leye_pose: Optional[Tensor] = None,\n create_reye_pose=True,\n reye_pose: Optional[Tensor] = None,\n use_face_contour: bool = False,\n batch_size: int = 1,\n gender: str = 'neutral',\n age: str = 'adult',\n dtype=torch.float32,\n ext: str = 'npz',\n **kwargs\n ) -> None:\n ''' SMPLX model constructor\n\n Parameters\n ----------\n model_path: str\n The path to the folder or to the file where the model\n parameters are stored\n num_expression_coeffs: int, optional\n Number of expression components to use\n (default = 10).\n create_expression: bool, optional\n Flag for creating a member variable for the expression space\n (default = True).\n expression: torch.tensor, optional, Bx10\n The default value for the expression member variable.\n (default = None)\n create_jaw_pose: bool, optional\n Flag for creating a member variable for the jaw pose.\n (default = False)\n jaw_pose: torch.tensor, optional, Bx3\n The default value for the jaw pose variable.\n (default = None)\n create_leye_pose: bool, optional\n Flag for creating a member variable for the left eye pose.\n (default = False)\n leye_pose: torch.tensor, optional, Bx10\n The default value for the left eye pose variable.\n (default = None)\n create_reye_pose: bool, optional\n Flag for creating a member variable for the right eye pose.\n (default = False)\n reye_pose: torch.tensor, optional, Bx10\n The default value for the right eye pose variable.\n (default = None)\n use_face_contour: bool, optional\n Whether to compute the keypoints that form the facial contour\n batch_size: int, optional\n The batch size used for creating the member variables\n gender: str, optional\n Which gender to load\n dtype: torch.dtype\n The data type for the created variables\n '''\n\n # Load the model\n if osp.isdir(model_path):\n model_fn = 'SMPLX_{}.{ext}'.format(gender.upper(), ext=ext)\n smplx_path = os.path.join(model_path, model_fn)\n else:\n smplx_path = model_path\n assert osp.exists(smplx_path), 'Path {} does not exist!'.format(\n smplx_path)\n\n if ext == 'pkl':\n with open(smplx_path, 'rb') as smplx_file:\n model_data = pickle.load(smplx_file, encoding='latin1')\n elif ext == 'npz':\n model_data = np.load(smplx_path, allow_pickle=True)\n else:\n raise ValueError('Unknown extension: {}'.format(ext))\n\n data_struct = Struct(**model_data)\n\n super(SMPLX, self).__init__(\n model_path=model_path,\n kid_template_path=kid_template_path,\n data_struct=data_struct,\n dtype=dtype,\n batch_size=batch_size,\n vertex_ids=VERTEX_IDS['smplx'],\n gender=gender, age=age, ext=ext,\n **kwargs)\n\n lmk_faces_idx = data_struct.lmk_faces_idx\n self.register_buffer('lmk_faces_idx',\n torch.tensor(lmk_faces_idx, dtype=torch.long))\n lmk_bary_coords = data_struct.lmk_bary_coords\n self.register_buffer('lmk_bary_coords',\n torch.tensor(lmk_bary_coords, dtype=dtype))\n\n self.use_face_contour = use_face_contour\n if self.use_face_contour:\n dynamic_lmk_faces_idx = data_struct.dynamic_lmk_faces_idx\n dynamic_lmk_faces_idx = torch.tensor(\n dynamic_lmk_faces_idx,\n dtype=torch.long)\n self.register_buffer('dynamic_lmk_faces_idx',\n dynamic_lmk_faces_idx)\n\n dynamic_lmk_bary_coords = data_struct.dynamic_lmk_bary_coords\n dynamic_lmk_bary_coords = torch.tensor(\n dynamic_lmk_bary_coords, dtype=dtype)\n self.register_buffer('dynamic_lmk_bary_coords',\n dynamic_lmk_bary_coords)\n\n neck_kin_chain = find_joint_kin_chain(self.NECK_IDX, self.parents)\n self.register_buffer(\n 'neck_kin_chain',\n torch.tensor(neck_kin_chain, dtype=torch.long))\n\n if create_jaw_pose:\n if jaw_pose is None:\n default_jaw_pose = torch.zeros([batch_size, 3], dtype=dtype)\n else:\n default_jaw_pose = torch.tensor(jaw_pose, dtype=dtype)\n jaw_pose_param = nn.Parameter(default_jaw_pose,\n requires_grad=True)\n self.register_parameter('jaw_pose', jaw_pose_param)\n\n if create_leye_pose:\n if leye_pose is None:\n default_leye_pose = torch.zeros([batch_size, 3], dtype=dtype)\n else:\n default_leye_pose = torch.tensor(leye_pose, dtype=dtype)\n leye_pose_param = nn.Parameter(default_leye_pose,\n requires_grad=True)\n self.register_parameter('leye_pose', leye_pose_param)\n\n if create_reye_pose:\n if reye_pose is None:\n default_reye_pose = torch.zeros([batch_size, 3], dtype=dtype)\n else:\n default_reye_pose = torch.tensor(reye_pose, dtype=dtype)\n reye_pose_param = nn.Parameter(default_reye_pose,\n requires_grad=True)\n self.register_parameter('reye_pose', reye_pose_param)\n\n shapedirs = data_struct.shapedirs\n if len(shapedirs.shape) < 3:\n shapedirs = shapedirs[:, :, None]\n if (shapedirs.shape[-1] < self.SHAPE_SPACE_DIM +\n self.EXPRESSION_SPACE_DIM):\n print(f'WARNING: You are using a {self.name()} model, with only'\n ' 10 shape and 10 expression coefficients.')\n expr_start_idx = 10\n expr_end_idx = 20\n num_expression_coeffs = min(num_expression_coeffs, 10)\n else:\n expr_start_idx = self.SHAPE_SPACE_DIM\n expr_end_idx = self.SHAPE_SPACE_DIM + num_expression_coeffs\n num_expression_coeffs = min(\n num_expression_coeffs, self.EXPRESSION_SPACE_DIM)\n\n self._num_expression_coeffs = num_expression_coeffs\n\n expr_dirs = shapedirs[:, :, expr_start_idx:expr_end_idx]\n self.register_buffer(\n 'expr_dirs', to_tensor(to_np(expr_dirs), dtype=dtype))\n\n if create_expression:\n if expression is None:\n default_expression = torch.zeros(\n [batch_size, self.num_expression_coeffs], dtype=dtype)\n else:\n default_expression = torch.tensor(expression, dtype=dtype)\n expression_param = nn.Parameter(default_expression,\n requires_grad=True)\n self.register_parameter('expression', expression_param)\n\n def name(self) -> str:\n return 'SMPL-X'\n\n @property\n def num_expression_coeffs(self):\n return self._num_expression_coeffs\n\n def create_mean_pose(self, data_struct, flat_hand_mean=False):\n # Create the array for the mean pose. If flat_hand is false, then use\n # the mean that is given by the data, rather than the flat open hand\n global_orient_mean = torch.zeros([3], dtype=self.dtype)\n body_pose_mean = torch.zeros([self.NUM_BODY_JOINTS * 3],\n dtype=self.dtype)\n jaw_pose_mean = torch.zeros([3], dtype=self.dtype)\n leye_pose_mean = torch.zeros([3], dtype=self.dtype)\n reye_pose_mean = torch.zeros([3], dtype=self.dtype)\n # pose_mean = np.concatenate([global_orient_mean, body_pose_mean, jaw_pose_mean, leye_pose_mean, reye_pose_mean, self.left_hand_mean, self.right_hand_mean], axis=0)\n pose_mean = torch.cat([global_orient_mean, body_pose_mean, jaw_pose_mean, leye_pose_mean, reye_pose_mean, self.left_hand_mean, self.right_hand_mean], 0)\n\n return pose_mean\n\n def extra_repr(self):\n msg = super(SMPLX, self).extra_repr()\n msg = [\n msg,\n f'Number of Expression Coefficients: {self.num_expression_coeffs}'\n ]\n return '\\n'.join(msg)\n\n def forward(\n self,\n betas: Optional[Tensor] = None,\n global_orient: Optional[Tensor] = None,\n body_pose: Optional[Tensor] = None,\n left_hand_pose: Optional[Tensor] = None,\n right_hand_pose: Optional[Tensor] = None,\n transl: Optional[Tensor] = None,\n expression: Optional[Tensor] = None,\n jaw_pose: Optional[Tensor] = None,\n leye_pose: Optional[Tensor] = None,\n reye_pose: Optional[Tensor] = None,\n return_verts: bool = True,\n return_full_pose: bool = False,\n pose2rot: bool = True,\n return_shaped: bool = True,\n **kwargs\n ) -> TensorOutput:\n '''\n Forward pass for the SMPLX model\n\n Parameters\n ----------\n global_orient: torch.tensor, optional, shape Bx3\n If given, ignore the member variable and use it as the global\n rotation of the body. Useful if someone wishes to predicts this\n with an external model. (default=None)\n betas: torch.tensor, optional, shape BxN_b\n If given, ignore the member variable `betas` and use it\n instead. For example, it can used if shape parameters\n `betas` are predicted from some external model.\n (default=None)\n expression: torch.tensor, optional, shape BxN_e\n If given, ignore the member variable `expression` and use it\n instead. For example, it can used if expression parameters\n `expression` are predicted from some external model.\n body_pose: torch.tensor, optional, shape Bx(J*3)\n If given, ignore the member variable `body_pose` and use it\n instead. For example, it can used if someone predicts the\n pose of the body joints are predicted from some external model.\n It should be a tensor that contains joint rotations in\n axis-angle format. (default=None)\n left_hand_pose: torch.tensor, optional, shape BxP\n If given, ignore the member variable `left_hand_pose` and\n use this instead. It should either contain PCA coefficients or\n joint rotations in axis-angle format.\n right_hand_pose: torch.tensor, optional, shape BxP\n If given, ignore the member variable `right_hand_pose` and\n use this instead. It should either contain PCA coefficients or\n joint rotations in axis-angle format.\n jaw_pose: torch.tensor, optional, shape Bx3\n If given, ignore the member variable `jaw_pose` and\n use this instead. It should either joint rotations in\n axis-angle format.\n transl: torch.tensor, optional, shape Bx3\n If given, ignore the member variable `transl` and use it\n instead. For example, it can used if the translation\n `transl` is predicted from some external model.\n (default=None)\n return_verts: bool, optional\n Return the vertices. (default=True)\n return_full_pose: bool, optional\n Returns the full axis-angle pose vector (default=False)\n\n Returns\n -------\n output: ModelOutput\n A named tuple of type `ModelOutput`\n '''\n\n # If no shape and pose parameters are passed along, then use the\n # ones from the module\n global_orient = (global_orient if global_orient is not None else\n self.global_orient)\n body_pose = body_pose if body_pose is not None else self.body_pose\n betas = betas if betas is not None else self.betas\n\n left_hand_pose = (left_hand_pose if left_hand_pose is not None else\n self.left_hand_pose)\n right_hand_pose = (right_hand_pose if right_hand_pose is not None else\n self.right_hand_pose)\n jaw_pose = jaw_pose if jaw_pose is not None else self.jaw_pose\n leye_pose = leye_pose if leye_pose is not None else self.leye_pose\n reye_pose = reye_pose if reye_pose is not None else self.reye_pose\n expression = expression if expression is not None else self.expression\n\n apply_trans = transl is not None or hasattr(self, 'transl')\n if transl is None:\n if hasattr(self, 'transl'):\n transl = self.transl\n\n if self.use_pca:\n left_hand_pose = torch.einsum(\n 'bi,ij->bj', [left_hand_pose, self.left_hand_components])\n right_hand_pose = torch.einsum(\n 'bi,ij->bj', [right_hand_pose, self.right_hand_components])\n\n full_pose = torch.cat([global_orient.reshape(-1, 1, 3),\n body_pose.reshape(-1, self.NUM_BODY_JOINTS, 3),\n jaw_pose.reshape(-1, 1, 3),\n leye_pose.reshape(-1, 1, 3),\n reye_pose.reshape(-1, 1, 3),\n left_hand_pose.reshape(-1, 15, 3),\n right_hand_pose.reshape(-1, 15, 3)],\n dim=1).reshape(-1, 165).to(self.pose_mean.device)\n\n # Add the mean pose of the model. Does not affect the body, only the\n # hands when flat_hand_mean == False\n full_pose += self.pose_mean\n\n batch_size = max(betas.shape[0], global_orient.shape[0],\n body_pose.shape[0])\n # Concatenate the shape and expression coefficients\n scale = int(batch_size / betas.shape[0])\n if scale > 1:\n betas = betas.expand(scale, -1)\n shape_components = torch.cat([betas, expression], dim=-1).to(self.pose_mean.device)\n\n shapedirs = torch.cat([self.shapedirs, self.expr_dirs], dim=-1)\n\n vertices, joints, A, T = lbs(shape_components, full_pose, self.v_template,\n shapedirs, self.posedirs,\n self.J_regressor, self.parents,\n self.lbs_weights, pose2rot=pose2rot,\n )\n\n lmk_faces_idx = self.lmk_faces_idx.unsqueeze(\n dim=0).expand(batch_size, -1).contiguous()\n lmk_bary_coords = self.lmk_bary_coords.unsqueeze(dim=0).repeat(\n self.batch_size, 1, 1)\n if self.use_face_contour:\n lmk_idx_and_bcoords = find_dynamic_lmk_idx_and_bcoords(\n vertices, full_pose, self.dynamic_lmk_faces_idx,\n self.dynamic_lmk_bary_coords,\n self.neck_kin_chain,\n pose2rot=True,\n )\n dyn_lmk_faces_idx, dyn_lmk_bary_coords = lmk_idx_and_bcoords\n\n lmk_faces_idx = torch.cat([lmk_faces_idx,\n dyn_lmk_faces_idx], 1)\n lmk_bary_coords = torch.cat(\n [lmk_bary_coords.expand(batch_size, -1, -1),\n dyn_lmk_bary_coords], 1)\n\n landmarks = vertices2landmarks(vertices, self.faces_tensor,\n lmk_faces_idx,\n lmk_bary_coords)\n\n # import matplotlib.pyplot as plt\n # import numpy as np\n # xs = joints[0,:,0]\n # ys = joints[0,:,1]\n # plt.scatter(xs, ys)\n\n # # zip joins x and y coordinates in pairs\n # count = 0\n # for x,y in zip(xs, ys):\n\n # label = \"{:.2f}\".format(count)\n\n # plt.annotate(label, # this is the text\n # (x,y), # these are the coordinates to position the label\n # textcoords=\"offset points\", # how to position the text\n # xytext=(0,10), # distance from text to points (x,y)\n # ha='center') # horizontal alignment can be left, right or center\n # count += 1\n # plt.savefig(\"joints.png\")\n # import pdb; pdb.set_trace()\n\n # Add any extra joints that might be needed\n joints = self.vertex_joint_selector(vertices, joints)\n # Add the landmarks to the joints\n joints = torch.cat([joints, landmarks], dim=1)\n # Map the joints to the current dataset\n\n if self.joint_mapper is not None:\n joints = self.joint_mapper(joints=joints, vertices=vertices)\n\n if apply_trans:\n joints += transl.unsqueeze(dim=1)\n vertices += transl.unsqueeze(dim=1)\n # clone because we are modifying them in-place\n A = A.clone()\n A[..., :3, 3] += transl.unsqueeze(dim=1)\n T = T.clone()\n T[..., :3, 3] += transl.unsqueeze(dim=1)\n\n v_shaped = None\n if return_shaped:\n v_shaped = self.v_template + blend_shapes(betas, self.shapedirs)\n else:\n v_shaped = Tensor(0)\n\n output = TensorOutput(vertices=vertices if return_verts else None,\n joints=joints,\n betas=betas,\n expression=expression,\n global_orient=global_orient,\n body_pose=body_pose,\n left_hand_pose=left_hand_pose,\n right_hand_pose=right_hand_pose,\n jaw_pose=jaw_pose,\n v_shaped=v_shaped,\n full_pose=full_pose if return_full_pose else None,\n A=A,\n T=T,\n f=self.faces)\n return output" }, { "identifier": "SMCReader", "path": "data/dna_rendering/dna_rendering_sample_code/SMCReader.py", "snippet": "class SMCReader:\n\n def __init__(self, file_path):\n \"\"\"Read SenseMocapFile endswith \".smc\".\n\n Args:\n file_path (str):\n Path to an SMC file.\n body_model (nn.Module or dict):\n Only needed for SMPL transformation to device frame\n if nn.Module: a body_model instance\n if dict: a body_model config\n \"\"\"\n self.smc = h5py.File(file_path, 'r')\n self.__calibration_dict__ = None\n self.__kinect_calib_dict__ = None \n self.__available_keys__ = list(self.smc.keys())\n \n self.actor_info = None \n if hasattr(self.smc, 'attrs') and len(self.smc.attrs.keys()) > 0:\n self.actor_info = dict(\n id=self.smc.attrs['actor_id'],\n perf_id=self.smc.attrs['performance_id'],\n age=self.smc.attrs['age'],\n gender=self.smc.attrs['gender'],\n height=self.smc.attrs['height'],\n weight=self.smc.attrs['weight'],\n ethnicity=self.smc.attrs['ethnicity'],\n )\n\n self.Camera_5mp_info = None \n if 'Camera_5mp' in self.smc:\n self.Camera_5mp_info = dict(\n num_device=self.smc['Camera_5mp'].attrs['num_device'],\n num_frame=self.smc['Camera_5mp'].attrs['num_frame'],\n resolution=self.smc['Camera_5mp'].attrs['resolution'],\n )\n self.Camera_12mp_info = None \n if 'Camera_12mp' in self.smc:\n self.Camera_12mp_info = dict(\n num_device=self.smc['Camera_12mp'].attrs['num_device'],\n num_frame=self.smc['Camera_12mp'].attrs['num_frame'],\n resolution=self.smc['Camera_12mp'].attrs['resolution'],\n )\n self.Kinect_info = None\n if 'Kinect' in self.smc:\n self.Kinect_info=dict(\n num_device=self.smc['Kinect'].attrs['num_device'],\n num_frame=self.smc['Kinect'].attrs['num_frame'],\n resolution=self.smc['Kinect'].attrs['resolution'],\n )\n\n def get_available_keys(self):\n return self.__available_keys__ \n\n def get_actor_info(self):\n return self.actor_info\n \n def get_Camera_12mp_info(self):\n return self.Camera_12mp_info\n\n def get_Camera_5mp_info(self):\n return self.Camera_5mp_info\n \n def get_Kinect_info(self):\n return self.Kinect_info\n \n ### RGB Camera Calibration\n def get_Calibration_all(self):\n \"\"\"Get calibration matrix of all cameras and save it in self\n \n Args:\n None\n\n Returns:\n Dictionary of calibration matrixs of all matrixs.\n dict( \n Camera_Parameter: Camera_id : Matrix_type : value\n )\n Notice:\n Camera_id(str) in {'Camera_5mp': '0'~'47', 'Camera_12mp':'48'~'60'}\n Matrix_type in ['D', 'K', 'RT', 'Color_Calibration'] \n \"\"\" \n if not 'Camera_Parameter' in self.smc:\n print(\"=== no key: Camera_Parameter.\\nplease check available keys!\")\n return None \n\n if self.__calibration_dict__ is not None:\n return self.__calibration_dict__\n\n self.__calibration_dict__ = dict()\n for ci in self.smc['Camera_Parameter'].keys():\n self.__calibration_dict__.setdefault(ci,dict())\n for mt in ['D', 'K', 'RT', 'Color_Calibration'] :\n self.__calibration_dict__[ci][mt] = \\\n self.smc['Camera_Parameter'][ci][mt][()]\n return self.__calibration_dict__\n\n def get_Calibration(self, Camera_id):\n \"\"\"Get calibration matrixs of a certain camera by its type and id \n\n Args:\n Camera_id (int/str of a number):\n Camera_id(str) in {'Camera_5mp': '0'~'47', \n 'Camera_12mp':'48'~'60'}\n Returns:\n Dictionary of calibration matrixs.\n ['D', 'K', 'RT', 'Color_Calibration'] \n \"\"\"\n if not 'Camera_Parameter' in self.smc:\n print(\"=== no key: Camera_Parameter.\\nplease check available keys!\")\n return None \n\n rs = dict()\n for k in ['D', 'K', 'RT', 'Color_Calibration'] :\n rs[k] = self.smc['Camera_Parameter'][f'{int(Camera_id):02d}'][k][()]\n return rs\n\n ### Kinect Camera Calibration\n def get_Kinect_Calibration_all(self):\n \"\"\"Get calibration matrix of all kinect cameras and save it in self\n \n Args:\n None\n\n Returns:\n Dictionary of calibration matrixs of all matrixs.\n dict( \n Camera_group: Camera_id : Matrix_type : value\n )\n Notice:\n Camera_group(str) in ['Kinect']\n Camera_id(str) in {'Kinect': '0'~'7'}\n Matrix_type in ['D', 'K', 'RT'] \n \"\"\" \n if not 'Calibration' in self.smc:\n print(\"=== no key: Calibration.\\nplease check available keys!\")\n return None \n\n if self.__kinect_calib_dict__ is not None:\n return self.__kinect_calib_dict__\n\n self.__kinect_calib_dict__ = dict()\n for cg in ['Kinect']:\n self.__kinect_calib_dict__.setdefault(cg,dict())\n for ci in self.smc['Calibration'][cg].keys():\n self.__kinect_calib_dict__[cg].setdefault(ci,dict())\n for mt in ['D', 'K', 'RT'] :\n self.__kinect_calib_dict__[cg][ci][mt] = \\\n self.smc['Calibration'][cg][ci][mt][()]\n return self.__kinect_calib_dict__\n\n def get_kinect_Calibration(self, Camera_id):\n \"\"\"Get calibration matrixs of a certain kinect camera by its type and id \n\n Args:\n Camera_group (str):\n Camera_group in ['Kinect'].\n Camera_id (int/str of a number):\n CameraID(str) in {'Kinect': '0'~'7'}\n Returns:\n Dictionary of calibration matrixs.\n ['D', 'K', 'RT'] \n \"\"\" \n if not 'Calibration' in self.smc:\n print(\"=== no key: Calibration.\\nplease check available keys!\")\n return None \n\n Camera_id = f'{int(Camera_id):02d}'\n assert(Camera_id in self.smc['Calibration'][\"Kinect\"].keys())\n rs = dict()\n for k in ['D', 'K', 'RT']:\n rs[k] = self.smc['Calibration'][\"Kinect\"][Camera_id][k][()]\n return rs\n\n ### RGB image\n def __read_color_from_bytes__(self, color_array):\n \"\"\"Decode an RGB image from an encoded byte array.\"\"\"\n return cv2.imdecode(color_array, cv2.IMREAD_COLOR)\n\n def get_mask(self, Camera_id, Frame_id=None, disable_tqdm=True):\n \"\"\"Get mask from Camera_id, Frame_id\n\n Args:\n Camera_id (int/str of a number):\n Camera_id (str) in \n {'Camera_5mp': '0'~'47', \n 'Camera_12mp':'48'~'60',\n 'Kinect': '0'~'7'}\n Frame_id a.(int/str of a number): '0' ~ 'num_frame'\n b.list of numbers (int/str)\n c.None: get batch of all imgs in order of time sequence \n Returns:\n a single img :\n 'color': HWC in bgr (uint8)\n 'mask' : HW (uint8)\n 'depth': HW (uint16)\n \"\"\" \n if not 'Mask' in self.smc:\n print(\"=== no key: Mask.\\nplease check available keys!\")\n return None \n\n Camera_id = str(Camera_id)\n\n assert(isinstance(Frame_id,(list,int, str, type(None))))\n if isinstance(Frame_id, (str,int)):\n Frame_id = str(Frame_id)\n assert(Frame_id in self.smc['Mask'][Camera_id]['mask'].keys())\n img_byte = self.smc['Mask'][Camera_id]['mask'][Frame_id][()]\n img_color = self.__read_color_from_bytes__(img_byte)\n img_color = np.max(img_color,2)\n return img_color \n else:\n if Frame_id is None:\n Frame_id_list =sorted([int(l) for l in self.smc['Mask'][Camera_id]['mask'].keys()])\n elif isinstance(Frame_id, list):\n Frame_id_list = Frame_id\n rs = []\n for fi in tqdm.tqdm(Frame_id_list, disable=disable_tqdm):\n rs.append(self.get_mask(Camera_id,fi))\n return np.stack(rs,axis=0)\n\n def get_img(self, Camera_group, Camera_id, Image_type, Frame_id=None, disable_tqdm=True):\n \"\"\"Get image its Camera_group, Camera_id, Image_type and Frame_id\n\n Args:\n Camera_group (str):\n Camera_group in ['Camera_12mp', 'Camera_5mp','Kinect'].\n Camera_id (int/str of a number):\n CameraID (str) in \n {'Camera_5mp': '0'~'47', \n 'Camera_12mp':'48'~'60',\n 'Kinect': '0'~'7'}\n Image_type(str) in \n {'Camera_5mp': ['color'], \n 'Camera_12mp': ['color'],\n 'Kinect': ['depth', 'mask']}\n Frame_id a.(int/str of a number): '0' ~ 'num_frame'('149') \n b.list of numbers (int/str)\n c.None: get batch of all imgs in order of time sequence \n Returns:\n a single img :\n 'color': HWC in bgr (uint8)\n 'mask' : HW (uint8)\n 'depth': HW (uint16)\n \"\"\" \n if not Camera_group in self.smc:\n print(\"=== no key: %s.\\nplease check available keys!\" % Camera_group)\n return None\n\n assert(Camera_group in ['Camera_12mp', 'Camera_5mp','Kinect'])\n Camera_id = str(Camera_id)\n assert(Camera_id in self.smc[Camera_group].keys())\n assert(Image_type in self.smc[Camera_group][Camera_id].keys())\n assert(isinstance(Frame_id,(list,int, str, type(None))))\n if isinstance(Frame_id, (str,int)):\n Frame_id = str(Frame_id)\n assert(Frame_id in self.smc[Camera_group][Camera_id][Image_type].keys())\n if Image_type in ['color']:\n img_byte = self.smc[Camera_group][Camera_id][Image_type][Frame_id][()]\n img_color = self.__read_color_from_bytes__(img_byte)\n if Image_type == 'mask':\n img_byte = self.smc[Camera_group][Camera_id][Image_type][Frame_id][()]\n img_color = self.__read_color_from_bytes__(img_byte)\n img_color = np.max(img_color,2)\n if Image_type == 'depth':\n img_color = self.smc[Camera_group][Camera_id][Image_type][Frame_id][()]\n return img_color \n else:\n if Frame_id is None:\n Frame_id_list =sorted([int(l) for l in self.smc[Camera_group][Camera_id][Image_type].keys()])\n elif isinstance(Frame_id, list):\n Frame_id_list = Frame_id\n rs = []\n for fi in tqdm(Frame_id_list, disable=disable_tqdm):\n rs.append(self.get_img(Camera_group, Camera_id, Image_type,fi))\n return np.stack(rs,axis=0)\n \n ###Keypoints2d\n def get_Keypoints2d(self, Camera_id, Frame_id=None):\n \"\"\"Get keypoint2D by its Camera_group, Camera_id and Frame_id\n\n Args:\n Camera_id (int/str of a number):\n CameraID (str) in \n {'Camera_5mp': '0'~'47', \n 'Camera_12mp':'48'~'60',}\n Frame_id a.(int/str of a number): '0' ~ 'num_frame-1'('149') \n b.list of numbers (int/str)\n c.None: get batch of all imgs in order of time sequence \n Returns:\n a single img :\n 'color': HWC in bgr (uint8)\n 'mask' : HW (uint8)\n 'depth': HW (uint16)\n \"\"\" \n if not 'Keypoints_2D' in self.smc:\n print(\"=== no key: Keypoints_2D.\\nplease check available keys!\")\n return None \n\n Camera_id = f'{int(Camera_id):02d}'\n assert(isinstance(Frame_id,(list,int, str, type(None))))\n if isinstance(Frame_id, (str,int)):\n Frame_id = int(Frame_id)\n return self.smc['Keypoints_2D'][Camera_id][()][Frame_id,:]\n else:\n if Frame_id is None:\n return self.smc['Keypoints_2D'][Camera_id][()]\n elif isinstance(Frame_id, list):\n Frame_id_list = Frame_id\n rs = []\n for fi in tqdm.tqdm(Frame_id_list):\n rs.append(self.get_Keypoints2d(Camera_id,fi))\n return np.stack(rs,axis=0)\n\n ###Keypoints3d\n def get_Keypoints3d(self, Frame_id=None):\n \"\"\"Get keypoint3D Frame_id, TODO coordinate\n\n Args:\n Frame_id a.(int/str of a number): '0' ~ 'num_frame-1'('149') \n b.list of numbers (int/str)\n c.None: get batch of all imgs in order of time sequence \n Returns:\n Keypoints3d tensor: np.ndarray of shape ([N], ,3)\n \"\"\" \n if not 'Keypoints_3D' in self.smc:\n print(\"=== no key: Keypoints_3D.\\nplease check available keys!\")\n return None \n\n if isinstance(Frame_id, (str,int)):\n Frame_id = int(Frame_id)\n return self.smc['Keypoints_3D'][\"keypoints3d\"][Frame_id,:]\n else:\n if Frame_id is None:\n return self.smc['Keypoints_3D'][\"keypoints3d\"]\n elif isinstance(Frame_id, list):\n Frame_id_list = Frame_id\n rs = []\n for fi in tqdm.tqdm(Frame_id_list):\n rs.append(self.get_Keypoints3d(fi))\n return np.stack(rs,axis=0)\n\n ###SMPLx\n def get_SMPLx(self, Frame_id=None):\n \"\"\"Get SMPL (world coordinate) computed by mocap processing pipeline.\n\n Args:\n Frame_id (int, list or None, optional):\n int: frame id of one selected frame\n list: a list of frame id\n None: all frames will be returned\n Defaults to None.\n\n Returns:\n dict:\n 'global_orient': np.ndarray of shape (N, 3)\n 'body_pose': np.ndarray of shape (N, 21, 3)\n 'transl': np.ndarray of shape (N, 3)\n 'betas': np.ndarray of shape (1, 10)\n \"\"\"\n if not 'SMPLx' in self.smc:\n print(\"=== no key: SMPLx.\\nplease check available keys!\")\n return None \n\n t_frame = self.smc['SMPLx']['betas'][()].shape[0]\n if Frame_id is None:\n frame_list = range(t_frame)\n elif isinstance(Frame_id, list):\n frame_list = [int(fi) for fi in Frame_id]\n elif isinstance(Frame_id, (int,str)):\n Frame_id = int(Frame_id)\n assert Frame_id < t_frame,\\\n f'Invalid frame_index {Frame_id}'\n frame_list = Frame_id\n else:\n raise TypeError('frame_id should be int, list or None.')\n\n smpl_dict = {}\n for key in ['betas', 'expression', 'fullpose', 'transl']:\n smpl_dict[key] = self.smc['SMPLx'][key][()][frame_list, ...]\n smpl_dict['scale'] = self.smc['SMPLx']['scale'][()]\n\n return smpl_dict\n\n def release(self):\n self.smc = None \n self.__calibration_dict__ = None\n self.__kinect_calib_dict__ = None\n self.__available_keys__ = None\n self.actor_info = None \n self.Camera_5mp_info = None\n self.Camera_12mp_info = None \n self.Kinect_info = None" } ]
import os import sys import numpy as np import torch import json import imageio import cv2 import random from PIL import Image from typing import NamedTuple from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \ read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal from pathlib import Path from plyfile import PlyData, PlyElement from utils.sh_utils import SH2RGB from scene.gaussian_model import BasicPointCloud from smpl.smpl_numpy import SMPL from smplx.body_models import SMPLX from data.dna_rendering.dna_rendering_sample_code.SMCReader import SMCReader
14,389
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # class CameraInfo(NamedTuple): uid: int pose_id: int R: np.array T: np.array K: np.array FovY: np.array FovX: np.array image: np.array image_path: str image_name: str bkgd_mask: np.array bound_mask: np.array width: int height: int smpl_param: dict world_vertex: np.array world_bound: np.array big_pose_smpl_param: dict big_pose_world_vertex: np.array big_pose_world_bound: np.array class SceneInfo(NamedTuple): point_cloud: BasicPointCloud train_cameras: list test_cameras: list nerf_normalization: dict ply_path: str def getNerfppNorm(cam_info): def get_center_and_diag(cam_centers): cam_centers = np.hstack(cam_centers) avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True) center = avg_cam_center dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True) diagonal = np.max(dist) return center.flatten(), diagonal cam_centers = [] for cam in cam_info: W2C = getWorld2View2(cam.R, cam.T) C2W = np.linalg.inv(W2C) cam_centers.append(C2W[:3, 3:4]) center, diagonal = get_center_and_diag(cam_centers) radius = diagonal * 1.1 translate = -center return {"translate": translate, "radius": radius} def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder): cam_infos = [] for idx, key in enumerate(cam_extrinsics): sys.stdout.write('\r') # the exact output you're looking for: sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics))) sys.stdout.flush() extr = cam_extrinsics[key] intr = cam_intrinsics[extr.camera_id] height = intr.height width = intr.width uid = intr.id
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # class CameraInfo(NamedTuple): uid: int pose_id: int R: np.array T: np.array K: np.array FovY: np.array FovX: np.array image: np.array image_path: str image_name: str bkgd_mask: np.array bound_mask: np.array width: int height: int smpl_param: dict world_vertex: np.array world_bound: np.array big_pose_smpl_param: dict big_pose_world_vertex: np.array big_pose_world_bound: np.array class SceneInfo(NamedTuple): point_cloud: BasicPointCloud train_cameras: list test_cameras: list nerf_normalization: dict ply_path: str def getNerfppNorm(cam_info): def get_center_and_diag(cam_centers): cam_centers = np.hstack(cam_centers) avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True) center = avg_cam_center dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True) diagonal = np.max(dist) return center.flatten(), diagonal cam_centers = [] for cam in cam_info: W2C = getWorld2View2(cam.R, cam.T) C2W = np.linalg.inv(W2C) cam_centers.append(C2W[:3, 3:4]) center, diagonal = get_center_and_diag(cam_centers) radius = diagonal * 1.1 translate = -center return {"translate": translate, "radius": radius} def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder): cam_infos = [] for idx, key in enumerate(cam_extrinsics): sys.stdout.write('\r') # the exact output you're looking for: sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics))) sys.stdout.flush() extr = cam_extrinsics[key] intr = cam_intrinsics[extr.camera_id] height = intr.height width = intr.width uid = intr.id
R = np.transpose(qvec2rotmat(extr.qvec))
2
2023-11-29 07:10:39+00:00
16k
emdgroup/baybe
examples/Serialization/basic_serialization.py
[ { "identifier": "Campaign", "path": "baybe/campaign.py", "snippet": "class Campaign(SerialMixin):\n \"\"\"Main class for interaction with BayBE.\n\n Campaigns define and record an experimentation process, i.e. the execution of a\n series of measurements and the iterative sequence of events involved.\n\n In particular, a campaign:\n * Defines the objective of an experimentation process.\n * Defines the search space over which the experimental parameter may vary.\n * Defines a strategy for traversing the search space.\n * Records the measurement data collected during the process.\n * Records metadata about the progress of the experimentation process.\n \"\"\"\n\n # DOE specifications\n searchspace: SearchSpace = field()\n \"\"\"The search space in which the experiments are conducted.\"\"\"\n\n objective: Objective = field()\n \"\"\"The optimization objective.\"\"\"\n\n strategy: Strategy = field(factory=TwoPhaseStrategy)\n \"\"\"The employed strategy\"\"\"\n\n # Data\n measurements_exp: pd.DataFrame = field(factory=pd.DataFrame, eq=eq_dataframe)\n \"\"\"The experimental representation of the conducted experiments.\"\"\"\n\n numerical_measurements_must_be_within_tolerance: bool = field(default=True)\n \"\"\"Flag for forcing numerical measurements to be within tolerance.\"\"\"\n\n # Metadata\n n_batches_done: int = field(default=0)\n \"\"\"The number of already processed batches.\"\"\"\n\n n_fits_done: int = field(default=0)\n \"\"\"The number of fits already done.\"\"\"\n\n # Private\n _cached_recommendation: pd.DataFrame = field(factory=pd.DataFrame, eq=eq_dataframe)\n \"\"\"The cached recommendations.\"\"\"\n\n @property\n def parameters(self) -> List[Parameter]:\n \"\"\"The parameters of the underlying search space.\"\"\"\n return self.searchspace.parameters\n\n @property\n def targets(self) -> List[NumericalTarget]:\n \"\"\"The targets of the underlying objective.\"\"\"\n # TODO: Currently, the `Objective` class is directly coupled to\n # `NumericalTarget`, hence the return type.\n return self.objective.targets\n\n @property\n def measurements_parameters_comp(self) -> pd.DataFrame:\n \"\"\"The computational representation of the measured parameters.\"\"\"\n if len(self.measurements_exp) < 1:\n return pd.DataFrame()\n return self.searchspace.transform(self.measurements_exp)\n\n @property\n def measurements_targets_comp(self) -> pd.DataFrame:\n \"\"\"The computational representation of the measured targets.\"\"\"\n if len(self.measurements_exp) < 1:\n return pd.DataFrame()\n return self.objective.transform(self.measurements_exp)\n\n @classmethod\n def from_config(cls, config_json: str) -> Campaign:\n \"\"\"Create a campaign from a configuration JSON.\n\n Args:\n config_json: The string with the configuration JSON.\n\n Returns:\n The constructed campaign.\n \"\"\"\n config = json.loads(config_json)\n config[\"searchspace\"] = {\n \"parameters\": config.pop(\"parameters\"),\n \"constraints\": config.pop(\"constraints\", None),\n }\n return _config_converter.structure(config, Campaign)\n\n @classmethod\n def to_config(cls) -> str:\n \"\"\"Extract the configuration of the campaign as JSON string.\n\n Note: This is not yet implemented. Use\n :func:`baybe.utils.serialization.SerialMixin.to_json` instead\n\n Returns:\n The configuration as JSON string.\n\n Raises:\n NotImplementedError: When trying to use this function.\n \"\"\"\n # TODO: Ideally, this should extract a \"minimal\" configuration, that is,\n # default values should not be exported, which cattrs supports via the\n # 'omit_if_default' option. Can be Implemented once the converter structure\n # has been cleaned up.\n raise NotImplementedError()\n\n @classmethod\n def validate_config(cls, config_json: str) -> None:\n \"\"\"Validate a given campaign configuration JSON.\n\n Args:\n config_json: The JSON that should be validated.\n \"\"\"\n config = json.loads(config_json)\n config[\"searchspace\"] = {\n \"parameters\": config.pop(\"parameters\"),\n \"constraints\": config.pop(\"constraints\", None),\n }\n _validation_converter.structure(config, Campaign)\n\n def add_measurements(self, data: pd.DataFrame) -> None:\n \"\"\"Add results from a dataframe to the internal database.\n\n Each addition of data is considered a new batch. Added results are checked for\n validity. Categorical values need to have an exact match. For numerical values,\n a campaign flag determines if values that lie outside a specified tolerance\n are accepted.\n Note that this modifies the provided data in-place.\n\n Args:\n data: The data to be added (with filled values for targets). Preferably\n created via :func:`baybe.campaign.Campaign.recommend`.\n\n Raises:\n ValueError: If one of the targets has missing values or NaNs in the provided\n dataframe.\n TypeError: If the target has non-numeric entries in the provided dataframe.\n \"\"\"\n # Invalidate recommendation cache first (in case of uncaught exceptions below)\n self._cached_recommendation = pd.DataFrame()\n\n # Check if all targets have valid values\n for target in self.targets:\n if data[target.name].isna().any():\n raise ValueError(\n f\"The target '{target.name}' has missing values or NaNs in the \"\n f\"provided dataframe. Missing target values are not supported.\"\n )\n if data[target.name].dtype.kind not in \"iufb\":\n raise TypeError(\n f\"The target '{target.name}' has non-numeric entries in the \"\n f\"provided dataframe. Non-numeric target values are not supported.\"\n )\n\n # Check if all targets have valid values\n for param in self.parameters:\n if data[param.name].isna().any():\n raise ValueError(\n f\"The parameter '{param.name}' has missing values or NaNs in the \"\n f\"provided dataframe. Missing parameter values are not supported.\"\n )\n if param.is_numeric and (data[param.name].dtype.kind not in \"iufb\"):\n raise TypeError(\n f\"The numerical parameter '{param.name}' has non-numeric entries in\"\n f\" the provided dataframe.\"\n )\n\n # Update meta data\n # TODO: refactor responsibilities\n self.searchspace.discrete.mark_as_measured(\n data, self.numerical_measurements_must_be_within_tolerance\n )\n\n # Read in measurements and add them to the database\n self.n_batches_done += 1\n to_insert = data.copy()\n to_insert[\"BatchNr\"] = self.n_batches_done\n to_insert[\"FitNr\"] = np.nan\n\n self.measurements_exp = pd.concat(\n [self.measurements_exp, to_insert], axis=0, ignore_index=True\n )\n\n # Telemetry\n telemetry_record_value(TELEM_LABELS[\"COUNT_ADD_RESULTS\"], 1)\n telemetry_record_recommended_measurement_percentage(\n self._cached_recommendation,\n data,\n self.parameters,\n self.numerical_measurements_must_be_within_tolerance,\n )\n\n def recommend(self, batch_quantity: int = 5) -> pd.DataFrame:\n \"\"\"Provide the recommendations for the next batch of experiments.\n\n Args:\n batch_quantity: Number of requested recommendations.\n\n Returns:\n Dataframe containing the recommendations in experimental representation.\n\n Raises:\n ValueError: If ``batch_quantity`` is smaller than 1.\n \"\"\"\n if batch_quantity < 1:\n raise ValueError(\n f\"You must at least request one recommendation per batch, but provided \"\n f\"{batch_quantity=}.\"\n )\n\n # If there are cached recommendations and the batch size of those is equal to\n # the previously requested one, we just return those\n if len(self._cached_recommendation) == batch_quantity:\n return self._cached_recommendation\n\n # Update recommendation meta data\n if len(self.measurements_exp) > 0:\n self.n_fits_done += 1\n self.measurements_exp[\"FitNr\"].fillna(self.n_fits_done, inplace=True)\n\n # Get the recommended search space entries\n rec = self.strategy.recommend(\n self.searchspace,\n batch_quantity,\n self.measurements_parameters_comp,\n self.measurements_targets_comp,\n )\n\n # Cache the recommendations\n self._cached_recommendation = rec.copy()\n\n # Telemetry\n telemetry_record_value(TELEM_LABELS[\"COUNT_RECOMMEND\"], 1)\n telemetry_record_value(TELEM_LABELS[\"BATCH_QUANTITY\"], batch_quantity)\n\n return rec" }, { "identifier": "Objective", "path": "baybe/objective.py", "snippet": "class Objective(SerialMixin):\n \"\"\"Class for managing optimization objectives.\"\"\"\n\n # TODO: The class currently directly depends on `NumericalTarget`. Once this\n # direct dependence is replaced with a dependence on `Target`, the type\n # annotations should be changed.\n\n mode: Literal[\"SINGLE\", \"DESIRABILITY\"] = field()\n \"\"\"The optimization mode.\"\"\"\n\n targets: List[Target] = field(validator=min_len(1))\n \"\"\"The list of targets used for the objective.\"\"\"\n\n weights: List[float] = field(converter=_normalize_weights)\n \"\"\"The weights used to balance the different targets. By default, all\n weights are equally important.\"\"\"\n\n combine_func: Literal[\"MEAN\", \"GEOM_MEAN\"] = field(\n default=\"GEOM_MEAN\", validator=in_([\"MEAN\", \"GEOM_MEAN\"])\n )\n \"\"\"The function used to combine the different targets.\"\"\"\n\n @weights.default\n def _default_weights(self) -> List[float]:\n \"\"\"Create the default weights.\"\"\"\n # By default, all targets are equally important.\n return [1.0] * len(self.targets)\n\n @targets.validator\n def _validate_targets( # noqa: DOC101, DOC103\n self, _: Any, targets: List[NumericalTarget]\n ) -> None:\n \"\"\"Validate targets depending on the objective mode.\n\n Raises:\n ValueError: If multiple targets are specified when using objective mode\n ``SINGLE``.\n \"\"\"\n # Raises a ValueError if multiple targets are specified when using objective\n # mode SINGLE.\n if (self.mode == \"SINGLE\") and (len(targets) != 1):\n raise ValueError(\n \"For objective mode 'SINGLE', exactly one target must be specified.\"\n )\n # Raises a ValueError if there are unbounded targets when using objective mode\n # DESIRABILITY.\n if self.mode == \"DESIRABILITY\":\n if any(not target.bounds.is_bounded for target in targets):\n raise ValueError(\n \"In 'DESIRABILITY' mode for multiple targets, each target must \"\n \"have bounds defined.\"\n )\n\n @weights.validator\n def _validate_weights( # noqa: DOC101, DOC103\n self, _: Any, weights: List[float]\n ) -> None:\n \"\"\"Validate target weights.\n\n Raises:\n ValueError: If the number of weights and the number of targets differ.\n \"\"\"\n if weights is None:\n return\n\n # Assert that weights is a list of numbers\n validator = deep_iterable(instance_of(float), instance_of(list))\n validator(self, _, weights)\n\n if len(weights) != len(self.targets):\n raise ValueError(\n f\"Weights list for your objective has {len(weights)} values, but you \"\n f\"defined {len(self.targets)} targets.\"\n )\n\n def transform(self, data: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Transform targets from experimental to computational representation.\n\n Args:\n data: The data to be transformed. Must contain all target values, can\n contain more columns.\n\n Returns:\n A new dataframe with the targets in computational representation. Columns\n will be as in the input (except when objective mode is ``DESIRABILITY``).\n\n Raises:\n ValueError: If the specified averaging function is unknown.\n \"\"\"\n # Perform transformations that are required independent of the mode\n transformed = data[[t.name for t in self.targets]].copy()\n for target in self.targets:\n transformed[target.name] = target.transform(data[target.name])\n\n # In desirability mode, the targets are additionally combined further into one\n if self.mode == \"DESIRABILITY\":\n if self.combine_func == \"GEOM_MEAN\":\n func = geom_mean\n elif self.combine_func == \"MEAN\":\n func = partial(np.average, axis=1)\n else:\n raise ValueError(\n f\"The specified averaging function {self.combine_func} is unknown.\"\n )\n\n vals = func(transformed.values, weights=self.weights)\n transformed = pd.DataFrame({\"Comp_Target\": vals}, index=transformed.index)\n\n return transformed" }, { "identifier": "CategoricalParameter", "path": "baybe/parameters/categorical.py", "snippet": "class CategoricalParameter(DiscreteParameter):\n \"\"\"Parameter class for categorical parameters.\"\"\"\n\n # class variables\n is_numeric: ClassVar[bool] = False\n # See base class.\n\n # object variables\n _values: Tuple[str, ...] = field(\n converter=tuple,\n validator=(\n min_len(2),\n validate_unique_values,\n deep_iterable(member_validator=(instance_of(str), min_len(1))),\n ),\n )\n # See base class.\n\n encoding: CategoricalEncoding = field(\n default=CategoricalEncoding.OHE, converter=CategoricalEncoding\n )\n # See base class.\n\n @property\n def values(self) -> tuple:\n \"\"\"The values of the parameter.\"\"\"\n return self._values\n\n @cached_property\n def comp_df(self) -> pd.DataFrame: # noqa: D102\n # See base class.\n if self.encoding is CategoricalEncoding.OHE:\n cols = [f\"{self.name}_{val}\" for val in self.values]\n comp_df = pd.DataFrame(np.eye(len(self.values), dtype=int), columns=cols)\n elif self.encoding is CategoricalEncoding.INT:\n comp_df = pd.DataFrame(range(len(self.values)), columns=[self.name])\n comp_df.index = pd.Index(self.values)\n\n return comp_df" }, { "identifier": "NumericalDiscreteParameter", "path": "baybe/parameters/numerical.py", "snippet": "class NumericalDiscreteParameter(DiscreteParameter):\n \"\"\"Parameter class for discrete numerical parameters (a.k.a. setpoints).\"\"\"\n\n # class variables\n is_numeric: ClassVar[bool] = True\n # See base class.\n\n # object variables\n # NOTE: The parameter values are assumed to be sorted by the tolerance validator.\n _values: Tuple[float, ...] = field(\n # FIXME[typing]: https://github.com/python-attrs/cattrs/issues/111\n converter=lambda x: sorted(cattrs.structure(x, Tuple[float, ...])), # type: ignore\n # FIXME[typing]: https://github.com/python-attrs/attrs/issues/1197\n validator=[\n min_len(2),\n validate_unique_values, # type: ignore\n validate_is_finite,\n ],\n )\n \"\"\"The values the parameter can take.\"\"\"\n\n tolerance: float = field(default=0.0)\n \"\"\"The absolute tolerance used for deciding whether a value is in range. A tolerance\n larger than half the minimum distance between parameter values is not allowed\n because that could cause ambiguity when inputting data points later.\"\"\"\n\n @tolerance.validator\n def _validate_tolerance( # noqa: DOC101, DOC103\n self, _: Any, tolerance: float\n ) -> None:\n \"\"\"Validate that the given tolerance is safe.\n\n The tolerance is the allowed experimental uncertainty when\n reading in measured values. A tolerance larger than half the minimum\n distance between parameter values is not allowed because that could cause\n ambiguity when inputting data points later.\n\n Raises:\n ValueError: If the tolerance is not safe.\n \"\"\"\n # For zero tolerance, the only left requirement is that all parameter values\n # are distinct, which is already ensured by the corresponding validator.\n if tolerance == 0.0:\n return\n\n min_dist = np.diff(self.values).min()\n if min_dist == (eps := np.nextafter(0, 1, dtype=DTypeFloatNumpy)):\n raise NumericalUnderflowError(\n f\"The distance between any two parameter values must be at least \"\n f\"twice the size of the used floating point resolution of {eps}.\"\n )\n\n if tolerance >= (max_tol := min_dist / 2.0):\n raise ValueError(\n f\"Parameter '{self.name}' is initialized with tolerance {tolerance} \"\n f\"but due to the given parameter values {self.values}, the specified \"\n f\"tolerance must be smaller than {max_tol} to avoid ambiguity.\"\n )\n\n @property\n def values(self) -> tuple: # noqa: D102\n # See base class.\n return self._values\n\n @cached_property\n def comp_df(self) -> pd.DataFrame: # noqa: D102\n # See base class.\n comp_df = pd.DataFrame({self.name: self.values}, index=self.values)\n return comp_df\n\n def is_in_range(self, item: float) -> bool: # noqa: D102\n # See base class.\n differences_acceptable = [\n np.abs(val - item) <= self.tolerance for val in self.values\n ]\n return any(differences_acceptable)" }, { "identifier": "SubstanceParameter", "path": "baybe/parameters/substance.py", "snippet": "class SubstanceParameter(DiscreteParameter):\n \"\"\"Generic substances that are treated with cheminformatics descriptors.\n\n Only a decorrelated subset of descriptors should be used as otherwise this can\n result in a large number of features. For a handful of molecules, keeping only\n descriptors that have a maximum correlation of 0.7 reduces the number of\n descriptors to about 5-20. The number might be substantially higher with more\n labels given.\n \"\"\"\n\n # class variables\n is_numeric: ClassVar[bool] = False\n # See base class.\n\n # object variables\n data: Dict[str, Smiles] = field(\n validator=deep_mapping(\n mapping_validator=min_len(2),\n # FIXME[typing]: https://github.com/python-attrs/attrs/issues/1206\n key_validator=and_(instance_of(str), min_len(1)),\n value_validator=lambda *x: None,\n )\n )\n \"\"\"A mapping that provides the SMILES strings for all available parameter values.\"\"\"\n\n decorrelate: Union[bool, float] = field(\n default=True, validator=validate_decorrelation\n )\n \"\"\"Specifies the used decorrelation mode for the parameter encoding.\n\n - ``False``: The encoding is used as is.\n - ``True``: The encoding is decorrelated using a default correlation threshold.\n - float in (0, 1): The encoding is decorrelated using the specified threshold.\n \"\"\"\n\n encoding: SubstanceEncoding = field(\n default=SubstanceEncoding.MORDRED, converter=SubstanceEncoding\n )\n # See base class.\n\n @encoding.validator\n def _validate_encoding(self, _: Any, value: str) -> None: # noqa: DOC101, DOC103\n \"\"\"Validate that the chosen encoding can be used.\n\n This validation is necessary since certain encodings are only usable when\n additional dependencies, in particular the ``chem`` dependency, have been\n installed.\n\n Raises:\n ImportError: If the ``chem``dependency was not installed but an encoding\n requiring this dependency is requested.\n \"\"\"\n if value is SubstanceEncoding.MORDRED and not (\n _MORDRED_INSTALLED and _RDKIT_INSTALLED\n ):\n raise ImportError(\n \"The mordred/rdkit packages are not installed, a SubstanceParameter \"\n \"with MORDRED encoding cannot be used. Consider installing baybe with \"\n \"'chem' dependency like 'pip install baybe[chem]'\"\n )\n if (\n value in [SubstanceEncoding.RDKIT, SubstanceEncoding.MORGAN_FP]\n and not _RDKIT_INSTALLED\n ):\n raise ImportError(\n \"The rdkit package is not installed, a SubstanceParameter with \"\n \"RDKIT or MORGAN_FP encoding cannot be used. Consider installing baybe \"\n \"with 'chem' dependency like 'pip install baybe[chem]'\"\n )\n\n @data.validator\n def _validate_substance_data( # noqa: DOC101, DOC103\n self, _: Any, data: Dict[str, Smiles]\n ) -> None:\n \"\"\"Validate that the substance data, provided as SMILES, is valid.\n\n Raises:\n ValueError: If one or more of the SMILES are invalid.\n ValueError: If the several entries represent the same substance.\n \"\"\"\n # Check for invalid SMILES\n canonical_smiles = {}\n exceptions = []\n for name, smiles in data.items():\n try:\n canonical_smiles[name] = get_canonical_smiles(smiles)\n except ValueError:\n exceptions.append(\n ValueError(\n f\"The SMILES '{smiles}' for molecule '{name}' does \"\n f\"not appear to be valid.\"\n )\n )\n if exceptions:\n raise ExceptionGroup(\"invalid SMILES\", exceptions)\n\n # Check for duplicate substances\n if groups := group_duplicate_values(canonical_smiles):\n exceptions = []\n for group, substances in groups.items():\n group_data = {s: data[s] for s in substances}\n exceptions.append(\n ValueError(\n f\"The following entries all represent the same substance \"\n f\"'{group}': {group_data}.\"\n )\n )\n raise ExceptionGroup(\"duplicate substances\", exceptions)\n\n @property\n def values(self) -> tuple:\n \"\"\"Returns the labels of the given set of molecules.\"\"\"\n # Since the order of dictionary keys is important here, this will only work\n # for Python 3.7 or higher\n return tuple(self.data.keys())\n\n @cached_property\n def comp_df(self) -> pd.DataFrame: # noqa: D102\n # See base class.\n vals = list(self.data.values())\n pref = self.name + \"_\"\n\n # Get the raw descriptors\n if self.encoding is SubstanceEncoding.MORDRED:\n comp_df = smiles_to_mordred_features(vals, prefix=pref)\n elif self.encoding is SubstanceEncoding.RDKIT:\n comp_df = smiles_to_rdkit_features(vals, prefix=pref)\n elif self.encoding is SubstanceEncoding.MORGAN_FP:\n comp_df = smiles_to_fp_features(vals, prefix=pref)\n else:\n raise ValueError(\n f\"Unknown parameter encoding {self.encoding} for parameter {self.name}.\"\n )\n\n # Drop NaN and constant columns\n comp_df = comp_df.loc[:, ~comp_df.isna().any(axis=0)]\n comp_df = df_drop_single_value_columns(comp_df)\n\n # If there are bool columns, convert them to int (possible for Mordred)\n comp_df.loc[:, comp_df.dtypes == bool] = comp_df.loc[\n :, comp_df.dtypes == bool\n ].astype(int)\n\n # Label the rows with the molecule names\n comp_df.index = pd.Index(self.values)\n\n # Get a decorrelated subset of the descriptors\n if self.decorrelate:\n if isinstance(self.decorrelate, bool):\n comp_df = df_uncorrelated_features(comp_df)\n else:\n comp_df = df_uncorrelated_features(comp_df, threshold=self.decorrelate)\n\n return comp_df" }, { "identifier": "SequentialGreedyRecommender", "path": "baybe/recommenders/bayesian.py", "snippet": "class SequentialGreedyRecommender(BayesianRecommender):\n \"\"\"Recommender using sequential Greedy optimization.\n\n This recommender implements the BoTorch functions ``optimize_acqf_discrete``,\n ``optimize_acqf`` and ``optimize_acqf_mixed`` for the optimization of discrete,\n continuous and hybrid search spaces. In particular, it can be applied in all\n kinds of search spaces.\n It is important to note that this algorithm performs a brute-force optimization in\n hybrid search spaces which can be computationally expensive. Thus, the behavior of\n the algorithm in hybrid search spaces can be controlled by two additional\n parameters.\n \"\"\"\n\n # Class variables\n compatibility: ClassVar[SearchSpaceType] = SearchSpaceType.HYBRID\n # See base class.\n\n # Object variables\n hybrid_sampler: str = field(\n validator=validators.in_([\"None\", \"Farthest\", \"Random\"]), default=\"None\"\n )\n \"\"\"Strategy used for sampling the discrete subspace when performing hybrid search\n space optimization.\"\"\"\n\n sampling_percentage: float = field(default=1.0)\n \"\"\"Percentage of discrete search space that is sampled when performing hybrid search\n space optimization. Ignored when ``hybrid_sampler=\"None\"``.\"\"\"\n\n @sampling_percentage.validator\n def _validate_percentage( # noqa: DOC101, DOC103\n self, _: Any, value: float\n ) -> None:\n \"\"\"Validate that the given value is in fact a percentage.\n\n Raises:\n ValueError: If ``value`` is not between 0 and 1.\n \"\"\"\n if not 0 <= value <= 1:\n raise ValueError(\n f\"Hybrid sampling percentage needs to be between 0 and 1 but is {value}\"\n )\n\n def _recommend_discrete(\n self,\n acquisition_function: Callable,\n searchspace: SearchSpace,\n candidates_comp: pd.DataFrame,\n batch_quantity: int,\n ) -> pd.Index:\n # See base class.\n\n # determine the next set of points to be tested\n candidates_tensor = to_tensor(candidates_comp)\n try:\n points, _ = optimize_acqf_discrete(\n acquisition_function, batch_quantity, candidates_tensor\n )\n except AttributeError as ex:\n raise NoMCAcquisitionFunctionError(\n f\"The '{self.__class__.__name__}' only works with Monte Carlo \"\n f\"acquisition functions.\"\n ) from ex\n\n # retrieve the index of the points from the input dataframe\n # IMPROVE: The merging procedure is conceptually similar to what\n # `SearchSpace._match_measurement_with_searchspace_indices` does, though using\n # a simpler matching logic. When refactoring the SearchSpace class to\n # handle continuous parameters, a corresponding utility could be extracted.\n idxs = pd.Index(\n pd.merge(\n candidates_comp.reset_index(),\n pd.DataFrame(points, columns=candidates_comp.columns),\n on=list(candidates_comp),\n )[\"index\"]\n )\n assert len(points) == len(idxs)\n\n return idxs\n\n def _recommend_continuous(\n self,\n acquisition_function: Callable,\n searchspace: SearchSpace,\n batch_quantity: int,\n ) -> pd.DataFrame:\n # See base class.\n\n try:\n points, _ = optimize_acqf(\n acq_function=acquisition_function,\n bounds=searchspace.continuous.param_bounds_comp,\n q=batch_quantity,\n num_restarts=5, # TODO make choice for num_restarts\n raw_samples=10, # TODO make choice for raw_samples\n equality_constraints=[\n c.to_botorch(searchspace.continuous.parameters)\n for c in searchspace.continuous.constraints_lin_eq\n ]\n or None, # TODO: https://github.com/pytorch/botorch/issues/2042\n inequality_constraints=[\n c.to_botorch(searchspace.continuous.parameters)\n for c in searchspace.continuous.constraints_lin_ineq\n ]\n or None, # TODO: https://github.com/pytorch/botorch/issues/2042\n )\n except AttributeError as ex:\n raise NoMCAcquisitionFunctionError(\n f\"The '{self.__class__.__name__}' only works with Monte Carlo \"\n f\"acquisition functions.\"\n ) from ex\n\n # Return optimized points as dataframe\n rec = pd.DataFrame(points, columns=searchspace.continuous.param_names)\n return rec\n\n def _recommend_hybrid(\n self,\n acquisition_function: Callable,\n searchspace: SearchSpace,\n batch_quantity: int,\n ) -> pd.DataFrame:\n \"\"\"Recommend points using the ``optimize_acqf_mixed`` function of BoTorch.\n\n This functions samples points from the discrete subspace, performs optimization\n in the continuous subspace with these points being fixed and returns the best\n found solution.\n **Important**: This performs a brute-force calculation by fixing every possible\n assignment of discrete variables and optimizing the continuous subspace for\n each of them. It is thus computationally expensive.\n\n Args:\n acquisition_function: The acquisition function to be optimized.\n searchspace: The search space in which the recommendations should be made.\n batch_quantity: The size of the calculated batch.\n\n Returns:\n The recommended points.\n\n Raises:\n NoMCAcquisitionFunctionError: If a non Monte Carlo acquisition function\n is chosen.\n \"\"\"\n # Get discrete candidates.\n _, candidates_comp = searchspace.discrete.get_candidates(\n allow_repeated_recommendations=True,\n allow_recommending_already_measured=True,\n )\n\n # Calculate the number of samples from the given percentage\n n_candidates = int(self.sampling_percentage * len(candidates_comp.index))\n\n # Potential sampling of discrete candidates\n if self.hybrid_sampler == \"Farthest\":\n ilocs = farthest_point_sampling(candidates_comp.values, n_candidates)\n candidates_comp = candidates_comp.iloc[ilocs]\n elif self.hybrid_sampler == \"Random\":\n candidates_comp = candidates_comp.sample(n_candidates)\n\n # Prepare all considered discrete configurations in the List[Dict[int, float]]\n # format expected by BoTorch\n # TODO: Currently assumes that discrete parameters are first and continuous\n # second. Once parameter redesign [11611] is completed, we might adjust this.\n candidates_comp.columns = list(range(len(candidates_comp.columns)))\n fixed_features_list = candidates_comp.to_dict(\"records\")\n\n # Actual call of the BoTorch optimization routine\n try:\n points, _ = optimize_acqf_mixed(\n acq_function=acquisition_function,\n bounds=searchspace.param_bounds_comp,\n q=batch_quantity,\n num_restarts=5, # TODO make choice for num_restarts\n raw_samples=10, # TODO make choice for raw_samples\n fixed_features_list=fixed_features_list,\n equality_constraints=[\n c.to_botorch(\n searchspace.continuous.parameters,\n idx_offset=len(candidates_comp.columns),\n )\n for c in searchspace.continuous.constraints_lin_eq\n ]\n or None, # TODO: https://github.com/pytorch/botorch/issues/2042\n inequality_constraints=[\n c.to_botorch(\n searchspace.continuous.parameters,\n idx_offset=len(candidates_comp.columns),\n )\n for c in searchspace.continuous.constraints_lin_ineq\n ]\n or None, # TODO: https://github.com/pytorch/botorch/issues/2042\n )\n except AttributeError as ex:\n raise NoMCAcquisitionFunctionError(\n f\"The '{self.__class__.__name__}' only works with Monte Carlo \"\n f\"acquisition functions.\"\n ) from ex\n\n # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n # TODO [14819]: The following code is necessary due to floating point\n # inaccuracies introduced by BoTorch (potentially due to some float32\n # conversion?). The current workaround is the match the recommendations back\n # to the closest candidate points.\n\n # Split discrete and continuous parts\n disc_points = points[:, : len(candidates_comp.columns)]\n cont_points = points[:, len(candidates_comp.columns) :]\n\n # Find the closest match with the discrete candidates\n candidates_comp_np = candidates_comp.to_numpy()\n disc_points_np = disc_points.numpy()\n if not disc_points_np.flags[\"C_CONTIGUOUS\"]:\n disc_points_np = np.ascontiguousarray(disc_points_np)\n if not candidates_comp_np.flags[\"C_CONTIGUOUS\"]:\n candidates_comp_np = np.ascontiguousarray(candidates_comp_np)\n disc_idxs_iloc = pairwise_distances_argmin(\n disc_points_np, candidates_comp_np, metric=\"manhattan\"\n )\n\n # Get the actual search space dataframe indices\n disc_idxs_loc = candidates_comp.iloc[disc_idxs_iloc].index\n\n # Get experimental representation of discrete and continuous parts\n rec_disc_exp = searchspace.discrete.exp_rep.loc[disc_idxs_loc]\n rec_cont_exp = pd.DataFrame(\n cont_points, columns=searchspace.continuous.param_names\n )\n\n # Adjust the index of the continuous part and concatenate both\n rec_cont_exp.index = rec_disc_exp.index\n rec_exp = pd.concat([rec_disc_exp, rec_cont_exp], axis=1)\n # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n\n return rec_exp" }, { "identifier": "FPSRecommender", "path": "baybe/recommenders/sampling.py", "snippet": "class FPSRecommender(NonPredictiveRecommender):\n \"\"\"An initial strategy that selects the candidates via Farthest Point Sampling.\"\"\"\n\n # Class variables\n compatibility: ClassVar[SearchSpaceType] = SearchSpaceType.DISCRETE\n # See base class.\n\n def _recommend_discrete(\n self,\n searchspace: SearchSpace,\n candidates_comp: pd.DataFrame,\n batch_quantity: int,\n ) -> pd.Index:\n # See base class.\n\n # Fit scaler on entire search space\n # TODO [Scaling]: scaling should be handled by search space object\n scaler = StandardScaler()\n scaler.fit(searchspace.discrete.comp_rep)\n candidates_scaled = np.ascontiguousarray(scaler.transform(candidates_comp))\n ilocs = farthest_point_sampling(candidates_scaled, batch_quantity)\n return candidates_comp.index[ilocs]" }, { "identifier": "SearchSpace", "path": "baybe/searchspace/core.py", "snippet": "class SearchSpace(SerialMixin):\n \"\"\"Class for managing the overall search space.\n\n The search space might be purely discrete, purely continuous, or hybrid.\n Note that created objects related to the computational representations of parameters\n (e.g., parameter bounds, computational dataframes, etc.) may use a different\n parameter order than what is specified through the constructor: While the\n passed parameter list can contain parameters in arbitrary order, the\n aforementioned objects (by convention) list discrete parameters first, followed\n by continuous ones.\n \"\"\"\n\n discrete: SubspaceDiscrete = field(factory=SubspaceDiscrete.empty)\n \"\"\"The (potentially empty) discrete subspace of the overall search space.\"\"\"\n\n continuous: SubspaceContinuous = field(factory=SubspaceContinuous.empty)\n \"\"\"The (potentially empty) continuous subspace of the overall search space.\"\"\"\n\n def __attrs_post_init__(self):\n \"\"\"Perform validation and record telemetry values.\"\"\"\n validate_parameters(self.parameters)\n validate_constraints(self.constraints, self.parameters)\n\n # Telemetry\n telemetry_record_value(TELEM_LABELS[\"COUNT_SEARCHSPACE_CREATION\"], 1)\n telemetry_record_value(TELEM_LABELS[\"NUM_PARAMETERS\"], len(self.parameters))\n telemetry_record_value(\n TELEM_LABELS[\"NUM_CONSTRAINTS\"],\n len(self.constraints) if self.constraints else 0,\n )\n\n @classmethod\n def from_product(\n cls,\n parameters: List[Parameter],\n constraints: Optional[List[Constraint]] = None,\n empty_encoding: bool = False,\n ) -> SearchSpace:\n \"\"\"Create a search space from a cartesian product.\n\n In the search space, optional subsequent constraints are applied.\n That is, the discrete subspace becomes the (filtered) cartesian product\n containing all discrete parameter combinations while, analogously, the\n continuous subspace represents the (filtered) cartesian product of all\n continuous parameters.\n\n Args:\n parameters: The parameters spanning the search space.\n constraints: An optional set of constraints restricting the valid parameter\n space.\n empty_encoding: If ``True``, uses an \"empty\" encoding for all parameters.\n This is useful, for instance, in combination with random search\n strategies that do not read the actual parameter values, since it avoids\n the (potentially costly) transformation of the parameter values to their\n computational representation.\n\n Returns:\n The constructed search space.\n \"\"\"\n # IMPROVE: The arguments get pre-validated here to avoid the potentially costly\n # creation of the subspaces. Perhaps there is an elegant way to bypass the\n # default validation in the initializer (which is required for other\n # ways of object creation) in this particular case.\n validate_parameters(parameters)\n if constraints:\n validate_constraints(constraints, parameters)\n else:\n constraints = []\n\n discrete: SubspaceDiscrete = SubspaceDiscrete.from_product(\n parameters=[\n cast(DiscreteParameter, p) for p in parameters if p.is_discrete\n ],\n constraints=[\n cast(DiscreteConstraint, c) for c in constraints if c.is_discrete\n ],\n empty_encoding=empty_encoding,\n )\n continuous: SubspaceContinuous = SubspaceContinuous(\n parameters=[\n cast(NumericalContinuousParameter, p)\n for p in parameters\n if not p.is_discrete\n ],\n constraints_lin_eq=[\n cast(ContinuousLinearEqualityConstraint, c)\n for c in constraints\n if isinstance(c, ContinuousLinearEqualityConstraint)\n ],\n constraints_lin_ineq=[\n cast(ContinuousLinearInequalityConstraint, c)\n for c in constraints\n if isinstance(c, ContinuousLinearInequalityConstraint)\n ],\n )\n\n return SearchSpace(discrete=discrete, continuous=continuous)\n\n @property\n def parameters(self) -> List[Parameter]:\n \"\"\"Return the list of parameters of the search space.\"\"\"\n return self.discrete.parameters + self.continuous.parameters\n\n @property\n def constraints(self) -> List[Constraint]:\n \"\"\"Return the constraints of the search space.\"\"\"\n return (\n self.discrete.constraints\n + self.continuous.constraints_lin_eq\n + self.continuous.constraints_lin_ineq\n )\n\n @property\n def type(self) -> SearchSpaceType:\n \"\"\"Return the type of the search space.\"\"\"\n if self.discrete.is_empty and not self.continuous.is_empty:\n return SearchSpaceType.CONTINUOUS\n if not self.discrete.is_empty and self.continuous.is_empty:\n return SearchSpaceType.DISCRETE\n if not self.discrete.is_empty and not self.continuous.is_empty:\n return SearchSpaceType.HYBRID\n raise RuntimeError(\"This line should be impossible to reach.\")\n\n @property\n def contains_mordred(self) -> bool:\n \"\"\"Indicates if any of the discrete parameters uses ``MORDRED`` encoding.\"\"\"\n return any(\n p.encoding is SubstanceEncoding.MORDRED for p in self.discrete.parameters\n )\n\n @property\n def contains_rdkit(self) -> bool:\n \"\"\"Indicates if any of the discrete parameters uses ``RDKIT`` encoding.\"\"\"\n return any(\n p.encoding is SubstanceEncoding.RDKIT for p in self.discrete.parameters\n )\n\n @property\n def param_bounds_comp(self) -> torch.Tensor:\n \"\"\"Return bounds as tensor.\"\"\"\n return torch.hstack(\n [self.discrete.param_bounds_comp, self.continuous.param_bounds_comp]\n )\n\n @property\n def task_idx(self) -> Optional[int]:\n \"\"\"The column index of the task parameter in computational representation.\"\"\"\n try:\n # TODO [16932]: Redesign metadata handling\n task_param = next(\n p for p in self.parameters if isinstance(p, TaskParameter)\n )\n except StopIteration:\n return None\n # TODO[11611]: The current approach has two limitations:\n # 1. It matches by column name and thus assumes that the parameter name\n # is used as the column name.\n # 2. It relies on the current implementation detail that discrete parameters\n # appear first in the computational dataframe.\n # --> Fix this when refactoring the data\n return self.discrete.comp_rep.columns.get_loc(task_param.name)\n\n @property\n def n_tasks(self) -> int:\n \"\"\"The number of tasks encoded in the search space.\"\"\"\n # TODO [16932]: This approach only works for a single task parameter. For\n # multiple task parameters, we need to align what the output should even\n # represent (e.g. number of combinatorial task combinations, number of\n # tasks per task parameter, etc).\n try:\n task_param = next(\n p for p in self.parameters if isinstance(p, TaskParameter)\n )\n return len(task_param.values)\n\n # When there are no task parameters, we effectively have a single task\n except StopIteration:\n return 1\n\n def transform(\n self,\n data: pd.DataFrame,\n ) -> pd.DataFrame:\n \"\"\"Transform data from experimental to computational representation.\n\n This function can e.g. be used to transform data obtained from measurements.\n Continuous parameters are not transformed but included.\n\n Args:\n data: The data to be transformed. Must contain all specified parameters, can\n contain more columns.\n\n Returns:\n A dataframe with the parameters in computational representation.\n \"\"\"\n # Transform subspaces separately\n df_discrete = self.discrete.transform(data)\n df_continuous = self.continuous.transform(data)\n\n # Combine Subspaces\n comp_rep = pd.concat([df_discrete, df_continuous], axis=1)\n\n return comp_rep" }, { "identifier": "TwoPhaseStrategy", "path": "baybe/strategies/composite.py", "snippet": "class TwoPhaseStrategy(Strategy):\n \"\"\"A two-phased strategy that switches the recommender at a certain specified point.\n\n The recommender is switched when a new (batch) recommendation is requested and\n the training data set size (i.e., the total number of collected measurements\n including those gathered before the strategy was active) is equal to or greater\n than the number specified via the ``switch_after`` parameter.\n\n Note:\n Throughout each phase, the strategy reuses the **same** recommender object,\n that is, no new instances are created. Therefore, special attention is required\n when using the strategy with stateful recommenders.\n \"\"\"\n\n initial_recommender: Recommender = field(factory=RandomRecommender)\n \"\"\"The initial recommender used by the strategy.\"\"\"\n\n recommender: Recommender = field(factory=SequentialGreedyRecommender)\n \"\"\"The recommender used by the strategy after the switch.\"\"\"\n\n switch_after: int = field(default=1)\n \"\"\"The number of experiments after which the recommender is switched for the next\n requested batch.\"\"\"\n\n def select_recommender( # noqa: D102\n self,\n searchspace: SearchSpace,\n batch_quantity: int = 1,\n train_x: Optional[pd.DataFrame] = None,\n train_y: Optional[pd.DataFrame] = None,\n ) -> Recommender:\n # See base class.\n\n # FIXME: enable predictive recommenders for empty training data\n if (train_x is None or len(train_x) == 0) and not isinstance(\n self.initial_recommender, NonPredictiveRecommender\n ):\n raise _unsupported_recommender_error\n\n return (\n self.recommender\n if len(train_x) >= self.switch_after\n else self.initial_recommender\n )" }, { "identifier": "NumericalTarget", "path": "baybe/targets/numerical.py", "snippet": "class NumericalTarget(Target, SerialMixin):\n \"\"\"Class for numerical targets.\"\"\"\n\n # NOTE: The type annotations of `bounds` are correctly overridden by the attrs\n # converter. Nonetheless, PyCharm's linter might incorrectly raise a type warning\n # when calling the constructor. This is a known issue:\n # https://youtrack.jetbrains.com/issue/PY-34243\n # Quote from attrs docs:\n # If a converter’s first argument has a type annotation, that type will\n # appear in the signature for __init__. A converter will override an explicit\n # type annotation or type argument.\n\n mode: TargetMode = field(converter=TargetMode)\n \"\"\"The target mode.\"\"\"\n\n bounds: Interval = field(default=None, converter=convert_bounds)\n \"\"\"Optional target bounds.\"\"\"\n\n transformation: Optional[TargetTransformation] = field(\n converter=lambda x: None if x is None else TargetTransformation(x)\n )\n \"\"\"An optional target transformation.\"\"\"\n\n @transformation.default\n def _default_transformation(self) -> Optional[TargetTransformation]:\n \"\"\"Provide the default transformation for bounded targets.\"\"\"\n if self.bounds.is_bounded:\n fun = _VALID_TRANSFORMATIONS[self.mode][0]\n warnings.warn(\n f\"The transformation for target '{self.name}' \"\n f\"in '{self.mode.name}' mode has not been specified. \"\n f\"Setting the transformation to '{fun.name}'.\",\n UserWarning,\n )\n return fun\n return None\n\n @bounds.validator\n def _validate_bounds(self, _: Any, bounds: Interval) -> None: # noqa: DOC101, DOC103\n \"\"\"Validate the bounds.\n\n Raises:\n ValueError: If the target is defined on a half-bounded interval.\n ValueError: If the target is in ``MATCH`` mode but the provided bounds\n are infinite.\n \"\"\"\n # IMPROVE: We could also include half-way bounds, which however don't work\n # for the desirability approach\n if bounds.is_half_bounded:\n raise ValueError(\"Targets on half-bounded intervals are not supported.\")\n if self.mode is TargetMode.MATCH and not bounds.is_bounded:\n raise ValueError(\n f\"Target '{self.name}' is in {TargetMode.MATCH.name} mode,\"\n f\"which requires finite bounds.\"\n )\n\n @transformation.validator\n def _validate_transformation( # noqa: DOC101, DOC103\n self, _: Any, value: Optional[TargetTransformation]\n ) -> None:\n \"\"\"Validate that the given transformation is compatible with the specified mode.\n\n Raises:\n ValueError: If the target transformation and mode are not compatible.\n \"\"\"\n if (value is not None) and (value not in _VALID_TRANSFORMATIONS[self.mode]):\n raise ValueError(\n f\"You specified bounds for target '{self.name}', but your \"\n f\"specified transformation '{value}' is not compatible \"\n f\"with the target mode {self.mode}'. It must be one \"\n f\"of {_VALID_TRANSFORMATIONS[self.mode]}.\"\n )\n\n def transform(self, data: pd.DataFrame) -> pd.DataFrame: # noqa: D102\n # See base class.\n\n # When bounds are given, apply the respective transformation\n if self.bounds.is_bounded:\n func = _get_target_transformation(\n # TODO[typing]: For bounded targets (see if clause), the attrs default\n # ensures there is always a transformation specified.\n # Use function overloads to make this explicit.\n self.mode,\n cast(TargetTransformation, self.transformation),\n )\n transformed = pd.DataFrame(\n func(data, *self.bounds.to_tuple()), index=data.index\n )\n\n # If no bounds are given, simply negate all target values for ``MIN`` mode.\n # For ``MAX`` mode, nothing needs to be done.\n # For ``MATCH`` mode, the validators avoid a situation without specified bounds.\n elif self.mode is TargetMode.MIN:\n transformed = -data\n\n else:\n transformed = data.copy()\n\n return transformed" } ]
import numpy as np from baybe import Campaign from baybe.objective import Objective from baybe.parameters import ( CategoricalParameter, NumericalDiscreteParameter, SubstanceParameter, ) from baybe.recommenders import FPSRecommender, SequentialGreedyRecommender from baybe.searchspace import SearchSpace from baybe.strategies import TwoPhaseStrategy from baybe.targets import NumericalTarget
11,824
### Example for the serialization of a campaign # This example shows how to serialize and also de-serialize a campaign. # It demonstrates and shows that the "original" and "new" objects behave the same. # This example assumes some basic familiarity with using BayBE. # We thus refer to [`campaign`](./../Basics/campaign.md) for a basic example. #### Necessary imports #### Experiment setup parameters = [ CategoricalParameter( name="Granularity", values=["coarse", "medium", "fine"], encoding="OHE", ), NumericalDiscreteParameter( name="Pressure[bar]", values=[1, 5, 10], tolerance=0.2, ), NumericalDiscreteParameter( name="Temperature[degree_C]", values=np.linspace(100, 200, 10), ), SubstanceParameter( name="Solvent", data={ "Solvent A": "COC", "Solvent B": "CCC", "Solvent C": "O", "Solvent D": "CS(=O)C", }, encoding="MORDRED", ), ] #### Creating the campaign campaign = Campaign( searchspace=SearchSpace.from_product(parameters=parameters, constraints=None), objective=Objective( mode="SINGLE", targets=[NumericalTarget(name="Yield", mode="MAX")] ), strategy=TwoPhaseStrategy(
### Example for the serialization of a campaign # This example shows how to serialize and also de-serialize a campaign. # It demonstrates and shows that the "original" and "new" objects behave the same. # This example assumes some basic familiarity with using BayBE. # We thus refer to [`campaign`](./../Basics/campaign.md) for a basic example. #### Necessary imports #### Experiment setup parameters = [ CategoricalParameter( name="Granularity", values=["coarse", "medium", "fine"], encoding="OHE", ), NumericalDiscreteParameter( name="Pressure[bar]", values=[1, 5, 10], tolerance=0.2, ), NumericalDiscreteParameter( name="Temperature[degree_C]", values=np.linspace(100, 200, 10), ), SubstanceParameter( name="Solvent", data={ "Solvent A": "COC", "Solvent B": "CCC", "Solvent C": "O", "Solvent D": "CS(=O)C", }, encoding="MORDRED", ), ] #### Creating the campaign campaign = Campaign( searchspace=SearchSpace.from_product(parameters=parameters, constraints=None), objective=Objective( mode="SINGLE", targets=[NumericalTarget(name="Yield", mode="MAX")] ), strategy=TwoPhaseStrategy(
recommender=SequentialGreedyRecommender(),
5
2023-11-27 17:02:40+00:00
16k
UX-Decoder/LLaVA-Grounding
llava/model/language_model/llava_llama_gd.py
[ { "identifier": "LlavaMetaModel", "path": "llava/model/llava_arch.py", "snippet": "class LlavaMetaModel:\n\n def __init__(self, config):\n super(LlavaMetaModel, self).__init__(config)\n\n if hasattr(config, \"mm_vision_tower\"):\n self.vision_tower = build_vision_tower(config, delay_load=True)\n self.mm_projector = nn.Linear(config.mm_hidden_size, config.hidden_size)\n\n def get_vision_tower(self):\n vision_tower = getattr(self, 'vision_tower', None)\n if type(vision_tower) is list:\n vision_tower = vision_tower[0]\n return vision_tower\n\n def initialize_vision_modules(self, model_args, fsdp=None):\n vision_tower = model_args.vision_tower\n mm_vision_select_layer = model_args.mm_vision_select_layer\n mm_vision_select_feature = model_args.mm_vision_select_feature\n pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter\n\n self.config.mm_vision_tower = vision_tower\n\n vision_tower = build_vision_tower(model_args)\n\n if fsdp is not None and len(fsdp) > 0:\n self.vision_tower = [vision_tower]\n else:\n self.vision_tower = vision_tower\n\n self.config.use_mm_proj = True\n self.config.mm_hidden_size = vision_tower.hidden_size\n self.config.mm_vision_select_layer = mm_vision_select_layer\n self.config.mm_vision_select_feature = mm_vision_select_feature\n\n if not hasattr(self, 'mm_projector'):\n self.mm_projector = nn.Linear(self.config.mm_hidden_size, self.config.hidden_size)\n\n if pretrain_mm_mlp_adapter is not None:\n mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu')\n def get_w(weights, keyword):\n return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k}\n\n # self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector'))\n self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector'))" }, { "identifier": "LlavaMetaForCausalLM", "path": "llava/model/llava_arch.py", "snippet": "class LlavaMetaForCausalLM(ABC):\n\n @abstractmethod\n def get_model(self):\n pass\n\n def get_vision_tower(self):\n return self.get_model().get_vision_tower()\n\n def encode_images(self, images):\n image_features = self.get_model().get_vision_tower()(images)\n image_features = self.get_model().mm_projector(image_features)\n return image_features\n\n def prepare_inputs_labels_for_multimodal(\n self, input_ids, attention_mask, past_key_values, labels, images\n ):\n vision_tower = self.get_vision_tower()\n if vision_tower is None or images is None or input_ids.shape[1] == 1:\n if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:\n attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)\n return input_ids, attention_mask, past_key_values, None, labels\n\n if type(images) is list or images.ndim == 5:\n concat_images = torch.cat([image for image in images], dim=0)\n image_features = self.encode_images(concat_images)\n split_sizes = [image.shape[0] for image in images]\n image_features = torch.split(image_features, split_sizes, dim=0)\n image_features = [x.flatten(0, 1) for x in image_features]\n else:\n image_features = self.encode_images(images)\n\n new_input_embeds = []\n new_labels = [] if labels is not None else None\n cur_image_idx = 0\n orig_embeds_params = getattr(self, 'orig_embeds_params', None)\n if orig_embeds_params is not None:\n orig_embeds_params_in = orig_embeds_params[0]\n orig_embeds_params_out = orig_embeds_params[1]\n # st_inp=self.tokenizer.encode(grounding_start)[1]\n # st_out=self.tokenizer.encode(grounding_start)[1]\n with torch.no_grad():\n self.get_input_embeddings().weight[:-3] = orig_embeds_params_in[:-3].data\n # if self.tokenizer.decode([len(self.tokenizer)-1])=='<seg>':\n self.get_output_embeddings().weight[:-3] = orig_embeds_params_out[:-3].data\n for batch_idx, cur_input_ids in enumerate(input_ids):\n if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:\n # multimodal LLM, but the current sample is not multimodal\n cur_input_embeds = self.get_model().embed_tokens(cur_input_ids)\n cur_input_embeds = cur_input_embeds + (0. * self.get_model().mm_projector(vision_tower.dummy_feature)).sum()\n new_input_embeds.append(cur_input_embeds)\n if labels is not None:\n new_labels.append(labels[batch_idx])\n cur_image_idx += 1\n continue\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n cur_new_input_embeds = []\n if labels is not None:\n cur_labels = labels[batch_idx]\n cur_new_labels = []\n assert cur_labels.shape == cur_input_ids.shape\n while image_token_indices.numel() > 0:\n cur_image_features = image_features[cur_image_idx]\n image_token_start = image_token_indices[0]\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]).detach())\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_new_labels.append(cur_labels[image_token_start:image_token_start+1])\n cur_labels = cur_labels[image_token_start+2:]\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_labels = cur_labels[image_token_start+1:]\n cur_image_idx += 1\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_input_ids = cur_input_ids[image_token_start+2:]\n else:\n cur_input_ids = cur_input_ids[image_token_start+1:]\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n if cur_input_ids.numel() > 0:\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids).detach())\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))\n if labels is not None:\n cur_new_labels.append(cur_labels)\n cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]\n cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)\n new_input_embeds.append(cur_new_input_embeds)\n if labels is not None:\n cur_new_labels = torch.cat(cur_new_labels, dim=0)\n new_labels.append(cur_new_labels)\n\n if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):\n max_len = max(x.shape[0] for x in new_input_embeds)\n\n new_input_embeds_align = []\n for cur_new_embed in new_input_embeds:\n cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)\n new_input_embeds_align.append(cur_new_embed)\n new_input_embeds = torch.stack(new_input_embeds_align, dim=0)\n\n if labels is not None:\n new_labels_align = []\n _new_labels = new_labels\n for cur_new_label in new_labels:\n cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)\n new_labels_align.append(cur_new_label)\n new_labels = torch.stack(new_labels_align, dim=0)\n\n if attention_mask is not None:\n new_attention_mask = []\n for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):\n new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)\n new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)\n cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)\n new_attention_mask.append(cur_new_attention_mask)\n attention_mask = torch.stack(new_attention_mask, dim=0)\n assert attention_mask.shape == new_labels.shape\n else:\n new_input_embeds = torch.stack(new_input_embeds, dim=0)\n if labels is not None:\n new_labels = torch.stack(new_labels, dim=0)\n\n if attention_mask is not None:\n new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)\n attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)\n assert attention_mask.shape == new_input_embeds.shape[:2]\n\n return None, attention_mask, past_key_values, new_input_embeds, new_labels\n\n def initialize_vision_tokenizer(self, model_args, tokenizer):\n if model_args.mm_use_im_patch_token:\n tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)\n self.resize_token_embeddings(len(tokenizer))\n\n if model_args.mm_use_im_start_end:\n num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, grounding_start, grounding_end, SEG_TOKEN], special_tokens=True)\n self.resize_token_embeddings(len(tokenizer))\n\n if num_new_tokens > 0:\n input_embeddings = self.get_input_embeddings().weight.data\n output_embeddings = self.get_output_embeddings().weight.data\n\n input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n\n input_embeddings[-num_new_tokens:] = input_embeddings_avg\n output_embeddings[-num_new_tokens:] = output_embeddings_avg\n\n if model_args.tune_mm_mlp_adapter:\n self.orig_embeds_params = [self.get_input_embeddings().weight.data.clone().cuda(),\n self.get_output_embeddings().weight.data.clone().cuda()]\n\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = True\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = True\n\n if model_args.pretrain_mm_mlp_adapter:\n mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location='cpu')\n embed_tokens_weight = mm_projector_weights['model.embed_tokens.weight']\n assert num_new_tokens == 2\n if input_embeddings.shape == embed_tokens_weight.shape:\n input_embeddings[-num_new_tokens:] = embed_tokens_weight[-num_new_tokens:]\n elif embed_tokens_weight.shape[0] == num_new_tokens:\n input_embeddings[-num_new_tokens:] = embed_tokens_weight\n else:\n raise ValueError(f\"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}.\")\n elif model_args.mm_use_im_patch_token:\n if model_args.tune_mm_mlp_adapter:\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = False\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = False\n else:\n # import pdb; pdb.set_trace()\n num_new_tokens = tokenizer.add_tokens([grounding_start, grounding_end, SEG_TOKEN], special_tokens=True)\n inits=['[',']','.']\n nums=[tokenizer.encode(init)[1] for init in inits]\n # inp_embs = self.get_input_embeddings().weight.data[nums]\n # out_embs = self.get_output_embeddings().weight.data[nums]\n self.resize_token_embeddings(len(tokenizer))\n\n if num_new_tokens > 0:\n # print(\"Emb length:\", len(self.get_input_embeddings().weight.data))\n # if len(self.get_input_embeddings().weight.data) > 0:\n # if len(self.get_input_embeddings().weight.data) > 0:\n # self.get_input_embeddings().weight.data[-num_new_tokens:] = inp_embs\n # self.get_output_embeddings().weight.data[-num_new_tokens:] = out_embs\n input_embeddings = self.get_input_embeddings().weight.data\n output_embeddings = self.get_output_embeddings().weight.data\n #\n input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n #\n input_embeddings[-num_new_tokens:] = input_embeddings_avg\n output_embeddings[-num_new_tokens:] = output_embeddings_avg\n\n if model_args.tune_mm_mlp_adapter:\n self.orig_embeds_params = [self.get_input_embeddings().weight.data.clone().cuda(),\n self.get_output_embeddings().weight.data.clone().cuda()]\n\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = True\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = True" }, { "identifier": "LlavaMetaForCausalLM_gd", "path": "llava/model/llava_arch.py", "snippet": "class LlavaMetaForCausalLM_gd(ABC):\n\n @abstractmethod\n def get_model(self):\n pass\n\n def get_vision_tower(self):\n return self.get_model().get_vision_tower()\n\n def encode_images(self, images):\n image_features = self.get_model().get_vision_tower()(images)\n image_features = self.get_model().mm_projector(image_features.to(self.get_model().mm_projector.state_dict()[\"weight\"].dtype))\n return image_features\n\n def prepare_inputs_labels_for_multimodal(\n self, input_ids, attention_mask, past_key_values, labels, images\n ):\n vision_tower = self.get_vision_tower()\n if vision_tower is None or images is None or input_ids.shape[1] == 1:\n if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:\n attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)\n return input_ids, attention_mask, past_key_values, None, labels\n\n if type(images) is list or images.ndim == 5:\n concat_images = torch.cat([image for image in images], dim=0)\n image_features = self.encode_images(concat_images)\n split_sizes = [image.shape[0] for image in images]\n image_features = torch.split(image_features, split_sizes, dim=0)\n image_features = [x.flatten(0, 1) for x in image_features]\n else:\n image_features = self.encode_images(images)\n\n new_input_embeds = []\n new_labels = [] if labels is not None else None\n cur_image_idx = 0\n orig_embeds_params = getattr(self, 'orig_embeds_params', None)\n if orig_embeds_params is not None:\n orig_embeds_params_in = orig_embeds_params[0]\n orig_embeds_params_out = orig_embeds_params[1]\n # st_inp=self.tokenizer.encode(grounding_start)[1]\n # st_out=self.tokenizer.encode(grounding_start)[1]\n with torch.no_grad():\n self.get_input_embeddings().weight[:-3] = orig_embeds_params_in[:-3].data\n # if self.tokenizer.decode([len(self.tokenizer)-1])=='<seg>':\n self.get_output_embeddings().weight[:-3] = orig_embeds_params_out[:-3].data\n\n for batch_idx, cur_input_ids in enumerate(input_ids):\n if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:\n # multimodal LLM, but the current sample is not multimodal\n cur_input_embeds = self.get_model().embed_tokens(cur_input_ids)\n cur_input_embeds = cur_input_embeds + (0. * self.get_model().mm_projector(vision_tower.dummy_feature)).sum()\n new_input_embeds.append(cur_input_embeds)\n if labels is not None:\n new_labels.append(labels[batch_idx])\n cur_image_idx += 1\n continue\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n cur_new_input_embeds = []\n if labels is not None:\n cur_labels = labels[batch_idx]\n cur_new_labels = []\n assert cur_labels.shape == cur_input_ids.shape\n while image_token_indices.numel() > 0:\n cur_image_features = image_features[cur_image_idx]\n image_token_start = image_token_indices[0]\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]))\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_new_labels.append(cur_labels[image_token_start:image_token_start+1])\n cur_labels = cur_labels[image_token_start+2:]\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_labels = cur_labels[image_token_start+1:]\n cur_image_idx += 1\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_input_ids = cur_input_ids[image_token_start+2:]\n else:\n cur_input_ids = cur_input_ids[image_token_start+1:]\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n if cur_input_ids.numel() > 0:\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))\n if labels is not None:\n cur_new_labels.append(cur_labels)\n cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]\n cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)\n new_input_embeds.append(cur_new_input_embeds)\n if labels is not None:\n cur_new_labels = torch.cat(cur_new_labels, dim=0)\n new_labels.append(cur_new_labels)\n\n if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):\n max_len = max(x.shape[0] for x in new_input_embeds)\n\n new_input_embeds_align = []\n for cur_new_embed in new_input_embeds:\n cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)\n new_input_embeds_align.append(cur_new_embed)\n new_input_embeds = torch.stack(new_input_embeds_align, dim=0)\n\n if labels is not None:\n new_labels_align = []\n _new_labels = new_labels\n for cur_new_label in new_labels:\n cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)\n new_labels_align.append(cur_new_label)\n new_labels = torch.stack(new_labels_align, dim=0)\n\n if attention_mask is not None:\n new_attention_mask = []\n for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):\n new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)\n new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)\n cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)\n new_attention_mask.append(cur_new_attention_mask)\n attention_mask = torch.stack(new_attention_mask, dim=0)\n assert attention_mask.shape == new_labels.shape\n else:\n new_input_embeds = torch.stack(new_input_embeds, dim=0)\n if labels is not None:\n new_labels = torch.stack(new_labels, dim=0)\n\n if attention_mask is not None:\n new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)\n attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)\n assert attention_mask.shape == new_input_embeds.shape[:2]\n\n return None, attention_mask, past_key_values, new_input_embeds, new_labels\n\n def initialize_vision_tokenizer(self, model_args, tokenizer):\n if model_args.mm_use_im_patch_token:\n tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)\n self.resize_token_embeddings(len(tokenizer))\n\n if model_args.mm_use_im_start_end:\n num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, grounding_start, grounding_end, SEG_TOKEN], special_tokens=True)\n self.resize_token_embeddings(len(tokenizer))\n\n if num_new_tokens > 0:\n input_embeddings = self.get_input_embeddings().weight.data\n output_embeddings = self.get_output_embeddings().weight.data\n\n input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n\n input_embeddings[-num_new_tokens:] = input_embeddings_avg\n output_embeddings[-num_new_tokens:] = output_embeddings_avg\n\n if model_args.tune_mm_mlp_adapter:\n self.orig_embeds_params = [self.get_input_embeddings().weight.data.clone().cuda(),\n self.get_output_embeddings().weight.data.clone().cuda()]\n\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = True\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = True\n\n if model_args.pretrain_mm_mlp_adapter:\n mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location='cpu')\n embed_tokens_weight = mm_projector_weights['model.embed_tokens.weight']\n assert num_new_tokens == 2\n if input_embeddings.shape == embed_tokens_weight.shape:\n input_embeddings[-num_new_tokens:] = embed_tokens_weight[-num_new_tokens:]\n elif embed_tokens_weight.shape[0] == num_new_tokens:\n input_embeddings[-num_new_tokens:] = embed_tokens_weight\n else:\n raise ValueError(f\"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}.\")\n elif model_args.mm_use_im_patch_token:\n if model_args.tune_mm_mlp_adapter:\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = False\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = False\n else:\n # import pdb; pdb.set_trace()\n num_new_tokens = tokenizer.add_tokens([grounding_start, grounding_end, SEG_TOKEN], special_tokens=True)\n inits=['[',']','.']\n nums=[tokenizer.encode(init)[1] for init in inits]\n\n self.resize_token_embeddings(len(tokenizer))\n\n if num_new_tokens > 0:\n input_embeddings = self.get_input_embeddings().weight.data\n output_embeddings = self.get_output_embeddings().weight.data\n #\n input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n #\n input_embeddings[-num_new_tokens:] = input_embeddings_avg\n output_embeddings[-num_new_tokens:] = output_embeddings_avg\n\n if model_args.tune_mm_mlp_adapter:\n self.orig_embeds_params = [self.get_input_embeddings().weight.data.clone().cuda(),\n self.get_output_embeddings().weight.data.clone().cuda()]\n\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = True\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = True\n\n def initialize_seg_modules(self, cfg):\n seg_model = BaseModel(cfg, build_model(cfg))\n seg_model = seg_model.from_pretrained(cfg.MODEL.WEIGHTS)\n self.seg_model = seg_model\n\n def freeze_seg_modules(self):\n for p in self.seg_model.parameters():\n p.requires_grad = False" }, { "identifier": "LlavaMetaForCausalLM_gd_interactive", "path": "llava/model/llava_arch.py", "snippet": "class LlavaMetaForCausalLM_gd_interactive(ABC):\n\n @abstractmethod\n def get_model(self):\n pass\n\n def get_vision_tower(self):\n return self.get_model().get_vision_tower()\n\n def encode_images(self, images):\n image_features = self.get_model().get_vision_tower()(images)\n image_features = self.get_model().mm_projector(image_features.to(self.get_model().mm_projector.state_dict()[\"weight\"].dtype))\n return image_features\n\n def prepare_inputs_labels_for_multimodal(\n self, input_ids, attention_mask, past_key_values, labels, images,obj_feats=None,num_it=0\n ):\n vision_tower = self.get_vision_tower()\n if vision_tower is None or images is None or input_ids.shape[1] == 1:\n if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:\n attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)\n return input_ids, attention_mask, past_key_values, None, labels\n\n if type(images) is list or images.ndim == 5:\n concat_images = torch.cat([image for image in images], dim=0)\n image_features = self.encode_images(concat_images)\n split_sizes = [image.shape[0] for image in images]\n image_features = torch.split(image_features, split_sizes, dim=0)\n image_features = [x.flatten(0, 1) for x in image_features]\n else:\n image_features = self.encode_images(images)\n\n new_input_embeds = []\n new_labels = [] if labels is not None else None\n cur_image_idx = 0\n orig_embeds_params = getattr(self, 'orig_embeds_params', None)\n if orig_embeds_params is not None:\n orig_embeds_params_in = orig_embeds_params[0]\n orig_embeds_params_out = orig_embeds_params[1]\n # st_inp=self.tokenizer.encode(grounding_start)[1]\n # st_out=self.tokenizer.encode(grounding_start)[1]\n with torch.no_grad():\n self.get_input_embeddings().weight[:-3] = orig_embeds_params_in[:-3].data\n # if self.tokenizer.decode([len(self.tokenizer)-1])=='<seg>':\n self.get_output_embeddings().weight[:-3] = orig_embeds_params_out[:-3].data\n\n for batch_idx, cur_input_ids in enumerate(input_ids):\n if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:\n # multimodal LLM, but the current sample is not multimodal\n cur_input_embeds = self.get_model().embed_tokens(cur_input_ids)\n cur_input_embeds = cur_input_embeds + (0. * self.get_model().mm_projector(vision_tower.dummy_feature)).sum()\n new_input_embeds.append(cur_input_embeds)\n if labels is not None:\n new_labels.append(labels[batch_idx])\n cur_image_idx += 1\n continue\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n cur_new_input_embeds = []\n if labels is not None:\n cur_labels = labels[batch_idx]\n cur_new_labels = []\n assert cur_labels.shape == cur_input_ids.shape\n while image_token_indices.numel() > 0:\n cur_image_features = image_features[cur_image_idx]\n image_token_start = image_token_indices[0]\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]))\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_new_labels.append(cur_labels[image_token_start:image_token_start+1])\n cur_labels = cur_labels[image_token_start+2:]\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_labels = cur_labels[image_token_start+1:]\n cur_image_idx += 1\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_input_ids = cur_input_ids[image_token_start+2:]\n else:\n cur_input_ids = cur_input_ids[image_token_start+1:]\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n if cur_input_ids.numel() > 0:\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))\n if batch_idx >= len(input_ids) - num_it:\n obj_idx = cur_input_ids == 1273\n idx_in_inter=batch_idx-(len(input_ids)-num_it)\n cur_new_input_embeds[-1][obj_idx] = obj_feats[idx_in_inter].to(cur_new_input_embeds[-1].dtype)\n if labels is not None:\n cur_labels[cur_labels==1273]=IGNORE_INDEX\n cur_new_labels.append(cur_labels)\n cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]\n cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)\n new_input_embeds.append(cur_new_input_embeds)\n if labels is not None:\n cur_new_labels = torch.cat(cur_new_labels, dim=0)\n new_labels.append(cur_new_labels)\n\n if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):\n max_len = max(x.shape[0] for x in new_input_embeds)\n\n new_input_embeds_align = []\n for cur_new_embed in new_input_embeds:\n cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)\n new_input_embeds_align.append(cur_new_embed)\n new_input_embeds = torch.stack(new_input_embeds_align, dim=0)\n\n if labels is not None:\n new_labels_align = []\n _new_labels = new_labels\n for cur_new_label in new_labels:\n cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)\n new_labels_align.append(cur_new_label)\n new_labels = torch.stack(new_labels_align, dim=0)\n\n if attention_mask is not None:\n new_attention_mask = []\n for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):\n new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)\n new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)\n cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)\n new_attention_mask.append(cur_new_attention_mask)\n attention_mask = torch.stack(new_attention_mask, dim=0)\n assert attention_mask.shape == new_labels.shape\n else:\n new_input_embeds = torch.stack(new_input_embeds, dim=0)\n if labels is not None:\n new_labels = torch.stack(new_labels, dim=0)\n\n if attention_mask is not None:\n new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)\n attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)\n assert attention_mask.shape == new_input_embeds.shape[:2]\n\n return None, attention_mask, past_key_values, new_input_embeds, new_labels\n def prepare_inputs_labels_for_multimodal_NoInter(\n self, input_ids, attention_mask, past_key_values, labels, images\n ):\n vision_tower = self.get_vision_tower()\n if vision_tower is None or images is None or input_ids.shape[1] == 1:\n if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:\n attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)\n return input_ids, attention_mask, past_key_values, None, labels\n\n if type(images) is list or images.ndim == 5:\n concat_images = torch.cat([image for image in images], dim=0)\n image_features = self.encode_images(concat_images)\n split_sizes = [image.shape[0] for image in images]\n image_features = torch.split(image_features, split_sizes, dim=0)\n image_features = [x.flatten(0, 1) for x in image_features]\n else:\n image_features = self.encode_images(images)\n\n new_input_embeds = []\n new_labels = [] if labels is not None else None\n cur_image_idx = 0\n orig_embeds_params = getattr(self, 'orig_embeds_params', None)\n if orig_embeds_params is not None:\n orig_embeds_params_in = orig_embeds_params[0]\n orig_embeds_params_out = orig_embeds_params[1]\n # st_inp=self.tokenizer.encode(grounding_start)[1]\n # st_out=self.tokenizer.encode(grounding_start)[1]\n with torch.no_grad():\n self.get_input_embeddings().weight[:-3] = orig_embeds_params_in[:-3].data\n # if self.tokenizer.decode([len(self.tokenizer)-1])=='<seg>':\n self.get_output_embeddings().weight[:-3] = orig_embeds_params_out[:-3].data\n\n for batch_idx, cur_input_ids in enumerate(input_ids):\n if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:\n # multimodal LLM, but the current sample is not multimodal\n cur_input_embeds = self.get_model().embed_tokens(cur_input_ids)\n cur_input_embeds = cur_input_embeds + (0. * self.get_model().mm_projector(vision_tower.dummy_feature)).sum()\n new_input_embeds.append(cur_input_embeds)\n if labels is not None:\n new_labels.append(labels[batch_idx])\n cur_image_idx += 1\n continue\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n cur_new_input_embeds = []\n if labels is not None:\n cur_labels = labels[batch_idx]\n cur_new_labels = []\n assert cur_labels.shape == cur_input_ids.shape\n while image_token_indices.numel() > 0:\n cur_image_features = image_features[cur_image_idx]\n image_token_start = image_token_indices[0]\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]))\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_new_labels.append(cur_labels[image_token_start:image_token_start+1])\n cur_labels = cur_labels[image_token_start+2:]\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_labels = cur_labels[image_token_start+1:]\n cur_image_idx += 1\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_input_ids = cur_input_ids[image_token_start+2:]\n else:\n cur_input_ids = cur_input_ids[image_token_start+1:]\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n if cur_input_ids.numel() > 0:\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))\n if labels is not None:\n cur_new_labels.append(cur_labels)\n cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]\n cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)\n new_input_embeds.append(cur_new_input_embeds)\n if labels is not None:\n cur_new_labels = torch.cat(cur_new_labels, dim=0)\n new_labels.append(cur_new_labels)\n\n if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):\n max_len = max(x.shape[0] for x in new_input_embeds)\n\n new_input_embeds_align = []\n for cur_new_embed in new_input_embeds:\n cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)\n new_input_embeds_align.append(cur_new_embed)\n new_input_embeds = torch.stack(new_input_embeds_align, dim=0)\n\n if labels is not None:\n new_labels_align = []\n _new_labels = new_labels\n for cur_new_label in new_labels:\n cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)\n new_labels_align.append(cur_new_label)\n new_labels = torch.stack(new_labels_align, dim=0)\n\n if attention_mask is not None:\n new_attention_mask = []\n for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):\n new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)\n new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)\n cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)\n new_attention_mask.append(cur_new_attention_mask)\n attention_mask = torch.stack(new_attention_mask, dim=0)\n assert attention_mask.shape == new_labels.shape\n else:\n new_input_embeds = torch.stack(new_input_embeds, dim=0)\n if labels is not None:\n new_labels = torch.stack(new_labels, dim=0)\n\n if attention_mask is not None:\n new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)\n attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)\n assert attention_mask.shape == new_input_embeds.shape[:2]\n\n return None, attention_mask, past_key_values, new_input_embeds, new_labels\n\n def initialize_vision_tokenizer(self, model_args, tokenizer):\n if model_args.mm_use_im_patch_token:\n tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)\n self.resize_token_embeddings(len(tokenizer))\n\n if model_args.mm_use_im_start_end:\n num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, grounding_start, grounding_end, SEG_TOKEN], special_tokens=True)\n self.resize_token_embeddings(len(tokenizer))\n\n if num_new_tokens > 0:\n input_embeddings = self.get_input_embeddings().weight.data\n output_embeddings = self.get_output_embeddings().weight.data\n\n input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n\n input_embeddings[-num_new_tokens:] = input_embeddings_avg\n output_embeddings[-num_new_tokens:] = output_embeddings_avg\n\n if model_args.tune_mm_mlp_adapter:\n self.orig_embeds_params = [self.get_input_embeddings().weight.data.clone().cuda(),\n self.get_output_embeddings().weight.data.clone().cuda()]\n\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = True\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = True\n\n if model_args.pretrain_mm_mlp_adapter:\n mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location='cpu')\n embed_tokens_weight = mm_projector_weights['model.embed_tokens.weight']\n assert num_new_tokens == 2\n if input_embeddings.shape == embed_tokens_weight.shape:\n input_embeddings[-num_new_tokens:] = embed_tokens_weight[-num_new_tokens:]\n elif embed_tokens_weight.shape[0] == num_new_tokens:\n input_embeddings[-num_new_tokens:] = embed_tokens_weight\n else:\n raise ValueError(f\"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}.\")\n elif model_args.mm_use_im_patch_token:\n if model_args.tune_mm_mlp_adapter:\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = False\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = False\n else:\n # import pdb; pdb.set_trace()\n num_new_tokens = tokenizer.add_tokens([grounding_start, grounding_end, SEG_TOKEN], special_tokens=True)\n inits=['[',']','.']\n nums=[tokenizer.encode(init)[1] for init in inits]\n # inp_embs = self.get_input_embeddings().weight.data[nums]\n # out_embs = self.get_output_embeddings().weight.data[nums]\n self.resize_token_embeddings(len(tokenizer))\n\n if num_new_tokens > 0:\n input_embeddings = self.get_input_embeddings().weight.data\n output_embeddings = self.get_output_embeddings().weight.data\n #\n input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n #\n input_embeddings[-num_new_tokens:] = input_embeddings_avg\n output_embeddings[-num_new_tokens:] = output_embeddings_avg\n\n if model_args.tune_mm_mlp_adapter:\n self.orig_embeds_params = [self.get_input_embeddings().weight.data.clone().cuda(),\n self.get_output_embeddings().weight.data.clone().cuda()]\n\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = True\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = True\n\n def initialize_seg_modules(self, cfg):\n seg_model = BaseModel(cfg, build_model(cfg))\n seg_model = seg_model.from_pretrained(cfg.MODEL.WEIGHTS)\n self.seg_model = seg_model\n\n def initialize_interactive_modules(self, cfg):\n from .semsam.BaseModel import BaseModel as SemSamBaseModel\n from .semsam import build_model as build_semsam_model\n\n seg_model = SemSamBaseModel(cfg, build_semsam_model(cfg))\n if not (cfg.MODEL.WEIGHTS == \"None\"):\n seg_model = seg_model.from_pretrained(cfg.MODEL.WEIGHTS)\n self.interactive_model = seg_model\n def freeze_seg_modules(self):\n for p in self.seg_model.parameters():\n p.requires_grad = False" } ]
from typing import List, Optional, Tuple, Union from torch.nn import CrossEntropyLoss from transformers import AutoConfig, AutoModelForCausalLM, \ LlamaConfig, LlamaModel, LlamaForCausalLM from transformers.modeling_outputs import CausalLMOutputWithPast from ..llava_arch import LlavaMetaModel, LlavaMetaForCausalLM, LlavaMetaForCausalLM_gd,LlavaMetaForCausalLM_gd_interactive import torch import torch.nn as nn import transformers
12,209
# Copyright 2023 Haotian Liu # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. IGNORE_INDEX=-100 # @dataclass class DataCollatorForSupervisedDataset(object): """Collate examples for supervised fine-tuning.""" # tokenizer: transformers.PreTrainedTokenizer def __call__(self, instances,tokenizer): input_ids, labels = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels")) input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=tokenizer.pad_token_id) labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX) input_ids = input_ids[:, :tokenizer.model_max_length] labels = labels[:, :tokenizer.model_max_length] batch = dict( input_ids=input_ids, labels=labels, attention_mask=input_ids.ne(tokenizer.pad_token_id), ) if 'image_clip' in instances[0]: images = [instance['image_clip'] for instance in instances] if all(x is not None and x.shape == images[0].shape for x in images): batch['images'] = torch.stack(images) else: batch['images'] = images return batch class LlavaConfig(LlamaConfig): model_type = "llava" class LlavaLlamaModel(LlavaMetaModel, LlamaModel): config_class = LlavaConfig def __init__(self, config: LlamaConfig): super(LlavaLlamaModel, self).__init__(config)
# Copyright 2023 Haotian Liu # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. IGNORE_INDEX=-100 # @dataclass class DataCollatorForSupervisedDataset(object): """Collate examples for supervised fine-tuning.""" # tokenizer: transformers.PreTrainedTokenizer def __call__(self, instances,tokenizer): input_ids, labels = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels")) input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=tokenizer.pad_token_id) labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX) input_ids = input_ids[:, :tokenizer.model_max_length] labels = labels[:, :tokenizer.model_max_length] batch = dict( input_ids=input_ids, labels=labels, attention_mask=input_ids.ne(tokenizer.pad_token_id), ) if 'image_clip' in instances[0]: images = [instance['image_clip'] for instance in instances] if all(x is not None and x.shape == images[0].shape for x in images): batch['images'] = torch.stack(images) else: batch['images'] = images return batch class LlavaConfig(LlamaConfig): model_type = "llava" class LlavaLlamaModel(LlavaMetaModel, LlamaModel): config_class = LlavaConfig def __init__(self, config: LlamaConfig): super(LlavaLlamaModel, self).__init__(config)
class LlavaLlamaForCausalLM(LlamaForCausalLM, LlavaMetaForCausalLM):
1
2023-12-04 10:59:21+00:00
16k