diff --git a/app.py b/app.py
index 087e9d188c76ae7bb8743a85f58e4ed853348653..c55213516ffe7d6d0ee481fc10ec7145e53ea19c 100644
--- a/app.py
+++ b/app.py
@@ -1,7 +1,5 @@
import gradio
-import sys
import os
-from pathlib import Path
from tops.config import instantiate
import gradio.inputs
os.system("pip install --upgrade pip")
@@ -9,16 +7,11 @@ os.system("pip install ftfy regex tqdm")
os.system("pip install --no-deps git+https://github.com/openai/CLIP.git")
os.system("pip install git+https://github.com/facebookresearch/detectron2@96c752ce821a3340e27edd51c28a00665dd32a30#subdirectory=projects/DensePose")
os.system("pip install --no-deps git+https://github.com/hukkelas/DSFD-Pytorch-Inference")
-sys.path.insert(0, Path(os.getcwd(), "deep_privacy"))
os.environ["TORCH_HOME"] = "torch_home"
from dp2 import utils
from gradio_demos.modules import ExampleDemo, WebcamDemo
-cfg_face = utils.load_config("deep_privacy/configs/anonymizers/face.py")
-for key in ["person_G_cfg", "cse_person_G_cfg", "face_G_cfg", "car_G_cfg"]:
- if key in cfg_face.anonymizer:
- cfg_face.anonymizer[key] = Path("deep_privacy", cfg_face.anonymizer[key])
-
+cfg_face = utils.load_config("configs/anonymizers/face.py")
anonymizer_face = instantiate(cfg_face.anonymizer, load_cache=False)
diff --git a/deep_privacy/configs/anonymizers/FB_cse.py b/configs/anonymizers/FB_cse.py
similarity index 100%
rename from deep_privacy/configs/anonymizers/FB_cse.py
rename to configs/anonymizers/FB_cse.py
diff --git a/deep_privacy/configs/anonymizers/FB_cse_mask.py b/configs/anonymizers/FB_cse_mask.py
similarity index 100%
rename from deep_privacy/configs/anonymizers/FB_cse_mask.py
rename to configs/anonymizers/FB_cse_mask.py
diff --git a/deep_privacy/configs/anonymizers/FB_cse_mask_face.py b/configs/anonymizers/FB_cse_mask_face.py
similarity index 100%
rename from deep_privacy/configs/anonymizers/FB_cse_mask_face.py
rename to configs/anonymizers/FB_cse_mask_face.py
diff --git a/deep_privacy/configs/anonymizers/deep_privacy1.py b/configs/anonymizers/deep_privacy1.py
similarity index 100%
rename from deep_privacy/configs/anonymizers/deep_privacy1.py
rename to configs/anonymizers/deep_privacy1.py
diff --git a/deep_privacy/configs/anonymizers/face.py b/configs/anonymizers/face.py
similarity index 100%
rename from deep_privacy/configs/anonymizers/face.py
rename to configs/anonymizers/face.py
diff --git a/deep_privacy/configs/anonymizers/face_fdf128.py b/configs/anonymizers/face_fdf128.py
similarity index 100%
rename from deep_privacy/configs/anonymizers/face_fdf128.py
rename to configs/anonymizers/face_fdf128.py
diff --git a/deep_privacy/configs/anonymizers/market1501/blackout.py b/configs/anonymizers/market1501/blackout.py
similarity index 100%
rename from deep_privacy/configs/anonymizers/market1501/blackout.py
rename to configs/anonymizers/market1501/blackout.py
diff --git a/deep_privacy/configs/anonymizers/market1501/person.py b/configs/anonymizers/market1501/person.py
similarity index 100%
rename from deep_privacy/configs/anonymizers/market1501/person.py
rename to configs/anonymizers/market1501/person.py
diff --git a/deep_privacy/configs/anonymizers/market1501/pixelation16.py b/configs/anonymizers/market1501/pixelation16.py
similarity index 100%
rename from deep_privacy/configs/anonymizers/market1501/pixelation16.py
rename to configs/anonymizers/market1501/pixelation16.py
diff --git a/deep_privacy/configs/anonymizers/market1501/pixelation8.py b/configs/anonymizers/market1501/pixelation8.py
similarity index 100%
rename from deep_privacy/configs/anonymizers/market1501/pixelation8.py
rename to configs/anonymizers/market1501/pixelation8.py
diff --git a/deep_privacy/configs/datasets/coco_cse.py b/configs/datasets/coco_cse.py
similarity index 100%
rename from deep_privacy/configs/datasets/coco_cse.py
rename to configs/datasets/coco_cse.py
diff --git a/deep_privacy/configs/datasets/fdf128.py b/configs/datasets/fdf128.py
similarity index 100%
rename from deep_privacy/configs/datasets/fdf128.py
rename to configs/datasets/fdf128.py
diff --git a/deep_privacy/configs/datasets/fdf256.py b/configs/datasets/fdf256.py
similarity index 100%
rename from deep_privacy/configs/datasets/fdf256.py
rename to configs/datasets/fdf256.py
diff --git a/deep_privacy/configs/datasets/fdh.py b/configs/datasets/fdh.py
similarity index 100%
rename from deep_privacy/configs/datasets/fdh.py
rename to configs/datasets/fdh.py
diff --git a/deep_privacy/configs/datasets/utils.py b/configs/datasets/utils.py
similarity index 100%
rename from deep_privacy/configs/datasets/utils.py
rename to configs/datasets/utils.py
diff --git a/deep_privacy/configs/defaults.py b/configs/defaults.py
similarity index 100%
rename from deep_privacy/configs/defaults.py
rename to configs/defaults.py
diff --git a/deep_privacy/configs/discriminators/sg2_discriminator.py b/configs/discriminators/sg2_discriminator.py
similarity index 100%
rename from deep_privacy/configs/discriminators/sg2_discriminator.py
rename to configs/discriminators/sg2_discriminator.py
diff --git a/deep_privacy/configs/fdf/deep_privacy1.py b/configs/fdf/deep_privacy1.py
similarity index 100%
rename from deep_privacy/configs/fdf/deep_privacy1.py
rename to configs/fdf/deep_privacy1.py
diff --git a/deep_privacy/configs/fdf/stylegan.py b/configs/fdf/stylegan.py
similarity index 100%
rename from deep_privacy/configs/fdf/stylegan.py
rename to configs/fdf/stylegan.py
diff --git a/deep_privacy/configs/fdf/stylegan_fdf128.py b/configs/fdf/stylegan_fdf128.py
similarity index 100%
rename from deep_privacy/configs/fdf/stylegan_fdf128.py
rename to configs/fdf/stylegan_fdf128.py
diff --git a/deep_privacy/configs/fdh/styleganL.py b/configs/fdh/styleganL.py
similarity index 100%
rename from deep_privacy/configs/fdh/styleganL.py
rename to configs/fdh/styleganL.py
diff --git a/deep_privacy/configs/fdh/styleganL_nocse.py b/configs/fdh/styleganL_nocse.py
similarity index 100%
rename from deep_privacy/configs/fdh/styleganL_nocse.py
rename to configs/fdh/styleganL_nocse.py
diff --git a/deep_privacy/configs/generators/stylegan_unet.py b/configs/generators/stylegan_unet.py
similarity index 100%
rename from deep_privacy/configs/generators/stylegan_unet.py
rename to configs/generators/stylegan_unet.py
diff --git a/deep_privacy/.gitignore b/deep_privacy/.gitignore
deleted file mode 100644
index 273a2860e73c6593fc03885e335a6166e5093715..0000000000000000000000000000000000000000
--- a/deep_privacy/.gitignore
+++ /dev/null
@@ -1,54 +0,0 @@
-# FILES
-*.yaml
-*.pkl
-*.flist
-*.zip
-*.out
-*.npy
-*.gz
-*.ckpt
-*.pth
-*.log
-*.pyc
-*.csv
-*.yml
-*.ods
-*.ods#
-*.json
-build_docker.sh
-
-# Images / Videos
-#*.png
-#*.jpg
-*.jpeg
-*.m4a
-*.mkv
-*.mp4
-
-# Directories created by inpaintron
-.cache/
-test_examples/
-.vscode
-__pycache__
-.debug/
-**/.ipynb_checkpoints/**
-outputs/
-
-
-# From pip setup
-build/
-*.egg-info
-*.egg
-.npm/
-
-# From dockerfile
-.bash_history
-.viminfo
-.local/
-*.pickle
-*.onnx
-
-
-sbatch_files/
-figures/
-image_dump/
\ No newline at end of file
diff --git a/deep_privacy/CHANGELOG.md b/deep_privacy/CHANGELOG.md
deleted file mode 100644
index 4a6b38c88d824387d4880eb4f9b43728b8007a8f..0000000000000000000000000000000000000000
--- a/deep_privacy/CHANGELOG.md
+++ /dev/null
@@ -1,13 +0,0 @@
-# Changelog
-
-## 23.03.2023
-- Quality of life improvements
-- Add support for refined keypoints for the FDH dataset.
-- Add FDF128 dataset loader with webdataset.
-- Support for using detector and anonymizer from DeepPrivacy1.
-- Update visualization of keypoints
-- Fix bug for upsampling/downsampling in the anonymization pipeline.
-- Support for keypoint-guided face anonymization.
-- Add ViTPose + Mask-RCNN detection model for keypoint-guided full-body anonymization.
-- Set caching of detections to False as default, as it can produce unexpected behaviour. For example, using a different score threshold requires re-run of detector.
-- Add Gradio Demos for face and body anonymization
\ No newline at end of file
diff --git a/deep_privacy/Dockerfile b/deep_privacy/Dockerfile
deleted file mode 100644
index 62dfae9c1aca7900bf4025a14962828a036de9a7..0000000000000000000000000000000000000000
--- a/deep_privacy/Dockerfile
+++ /dev/null
@@ -1,47 +0,0 @@
-FROM nvcr.io/nvidia/pytorch:22.08-py3
-ARG UID=1000
-ARG UNAME=testuser
-ARG WANDB_API_KEY
-RUN useradd -ms /bin/bash -u $UID $UNAME && \
- mkdir -p /home/${UNAME} &&\
- chown -R $UID /home/${UNAME}
-WORKDIR /home/${UNAME}
-ENV DEBIAN_FRONTEND="noninteractive"
-ENV WANDB_API_KEY=$WANDB_API_KEY
-ENV TORCH_HOME=/home/${UNAME}/.cache
-
-# OPTIONAL - DeepPrivacy2 uses these environment variables to set directories outside the current working directory
-#ENV BASE_DATASET_DIR=/work/haakohu/datasets
-#ENV BASE_OUTPUT_DIR=/work/haakohu/outputs
-#ENV FBA_METRICS_CACHE=/work/haakohu/metrics_cache
-
-RUN apt-get update && apt-get install ffmpeg libsm6 libxext6 qt5-default -y
-RUN pip install git+https://github.com/facebookresearch/detectron2@96c752ce821a3340e27edd51c28a00665dd32a30#subdirectory=projects/DensePose
-COPY setup.py setup.py
-RUN pip install \
- numpy>=1.20 \
- matplotlib \
- cython \
- tensorboard \
- tqdm \
- ninja==1.10.2 \
- opencv-python==4.5.5.64 \
- moviepy \
- pyspng \
- git+https://github.com/hukkelas/DSFD-Pytorch-Inference \
- wandb \
- termcolor \
- git+https://github.com/hukkelas/torch_ops.git \
- git+https://github.com/wmuron/motpy@c77f85d27e371c0a298e9a88ca99292d9b9cbe6b \
- fast_pytorch_kmeans \
- einops_exts \
- einops \
- regex \
- setuptools==59.5.0 \
- resize_right==0.0.2 \
- pillow \
- scipy==1.7.1 \
- webdataset==0.2.26 \
- scikit-image \
- timm==0.6.7
-RUN pip install --no-deps torch_fidelity==0.3.0 clip@git+https://github.com/openai/CLIP.git@b46f5ac7587d2e1862f8b7b1573179d80dcdd620
\ No newline at end of file
diff --git a/deep_privacy/LICENSE b/deep_privacy/LICENSE
deleted file mode 100644
index 261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64..0000000000000000000000000000000000000000
--- a/deep_privacy/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/deep_privacy/anonymize.py b/deep_privacy/anonymize.py
deleted file mode 100644
index 626b4ef2c36ff0e850016d22c493ae7601f650b8..0000000000000000000000000000000000000000
--- a/deep_privacy/anonymize.py
+++ /dev/null
@@ -1,255 +0,0 @@
-import hashlib
-from typing import Optional
-import click
-import tops
-import numpy as np
-import tqdm
-import moviepy.editor as mp
-import cv2
-from tops.config import instantiate
-from pathlib import Path
-from PIL import Image
-from dp2 import utils
-from detectron2.data.detection_utils import _apply_exif_orientation
-from tops import logger
-from dp2.utils.bufferless_video_capture import BufferlessVideoCapture
-
-
-def show_video(video_path):
- video_cap = cv2.VideoCapture(str(video_path))
- while video_cap.isOpened():
- ret, frame = video_cap.read()
- cv2.imshow("Frame", frame)
- key = cv2.waitKey(25)
- if key == ord("q"):
- break
- video_cap.release()
- cv2.destroyAllWindows()
-
-
-class ImageIndexTracker:
-
- def __init__(self, fn) -> None:
- self.fn = fn
- self.idx = 0
-
- def fl_image(self, frame):
- self.idx += 1
- return self.fn(frame, self.idx-1)
-
-
-def anonymize_video(
- video_path, output_path: Path,
- anonymizer, visualize: bool, max_res: int,
- start_time: int, fps: int,
- end_time: int,
- visualize_detection: bool,
- track: bool,
- synthesis_kwargs,
- **kwargs):
- video = mp.VideoFileClip(str(video_path))
- if track:
- anonymizer.initialize_tracker(video.fps)
-
- def process_frame(frame, idx):
- frame = np.array(resize(Image.fromarray(frame), max_res))
- cache_id = hashlib.md5(frame).hexdigest()
- frame = utils.im2torch(frame, to_float=False, normalize=False)[0]
- cache_id_ = cache_id + str(idx)
- synthesis_kwargs["cache_id"] = cache_id_
- if visualize_detection:
- anonymized = anonymizer.visualize_detection(frame, cache_id=cache_id_)
- else:
- anonymized = anonymizer(frame, **synthesis_kwargs)
- anonymized = utils.im2numpy(anonymized)
- if visualize:
- cv2.imshow("frame", anonymized[:, :, ::-1])
- key = cv2.waitKey(1)
- if key == ord("q"):
- exit()
- return anonymized
- video: mp.VideoClip = video.subclip(start_time, end_time)
-
- if fps is not None:
- video = video.set_fps(fps)
-
- video = video.fl_image(ImageIndexTracker(process_frame).fl_image)
- if str(output_path).endswith(".avi"):
- output_path = str(output_path).replace(".avi", ".mp4")
- if not output_path.parent.exists():
- output_path.parent.mkdir(parents=True)
- video.write_videofile(str(output_path))
-
-
-def resize(frame: Image.Image, max_res):
- if max_res is None:
- return frame
- f = max(*[x/max_res for x in frame.size], 1)
- if f == 1:
- return frame
- new_shape = [int(x/f) for x in frame.size]
- return frame.resize(new_shape, resample=Image.BILINEAR)
-
-
-def anonymize_image(
- image_path, output_path: Path, visualize: bool,
- anonymizer, max_res: int,
- visualize_detection: bool,
- synthesis_kwargs,
- **kwargs):
- with Image.open(image_path) as im:
- im = _apply_exif_orientation(im)
- orig_im_mode = im.mode
-
- im = im.convert("RGB")
- im = resize(im, max_res)
- im = np.array(im)
- md5_ = hashlib.md5(im).hexdigest()
- im = utils.im2torch(np.array(im), to_float=False, normalize=False)[0]
- synthesis_kwargs["cache_id"] = md5_
- if visualize_detection:
- im_ = anonymizer.visualize_detection(tops.to_cuda(im), cache_id=md5_)
- else:
- im_ = anonymizer(im, **synthesis_kwargs)
- im_ = utils.im2numpy(im_)
- if visualize:
- while True:
- cv2.imshow("frame", im_[:, :, ::-1])
- key = cv2.waitKey(0)
- if key == ord("q"):
- break
- elif key == ord("u"):
- im_ = utils.im2numpy(anonymizer(im, **synthesis_kwargs))
- im = Image.fromarray(im_).convert(orig_im_mode)
- if output_path is not None:
- output_path.parent.mkdir(exist_ok=True, parents=True)
- im.save(output_path, optimize=False, quality=100)
- print(f"Saved to: {output_path}")
-
-
-def anonymize_file(input_path: Path, output_path: Optional[Path], **kwargs):
- if output_path is not None and output_path.is_file():
- logger.warn(f"Overwriting previous file: {output_path}")
- if tops.is_image(input_path):
- anonymize_image(input_path, output_path, **kwargs)
- elif tops.is_video(input_path):
- anonymize_video(input_path, output_path, **kwargs)
- else:
- logger.log(f"Filepath not a video or image file: {input_path}")
-
-
-def anonymize_directory(input_dir: Path, output_dir: Path, **kwargs):
- for childname in tqdm.tqdm(input_dir.iterdir()):
- childpath = input_dir.joinpath(childname.name)
- output_path = output_dir.joinpath(childname.name)
- if not childpath.is_file():
- anonymize_directory(childpath, output_path, **kwargs)
- else:
- assert childpath.is_file()
- anonymize_file(childpath, output_path, **kwargs)
-
-def anonymize_webcam(
- anonymizer, max_res: int,
- synthesis_kwargs,
- visualize_detection,
- track: bool,
- **kwargs):
- import time
- cap = BufferlessVideoCapture(0, width=1920, height=1080)
- t = time.time()
- frames = 0
- if track:
- anonymizer.initialize_tracker(fps=5) # FPS used for tracking objects
- while True:
- # Capture frame-by-frame
- ret, frame = cap.read()
- frame = Image.fromarray(frame[:, :, ::-1])
- frame = resize(frame, max_res)
- frame = np.array(frame)
- im = utils.im2torch(np.array(frame), to_float=False, normalize=False)[0]
- if visualize_detection:
- im_ = anonymizer.visualize_detection(tops.to_cuda(im))
- else:
- im_ = anonymizer(im, **synthesis_kwargs)
- im_ = utils.im2numpy(im_)
-
- frames += 1
- delta = time.time() - t
- fps = "?"
- if delta > 1e-6:
- fps = frames / delta
- print(f"FPS: {fps:.3f}", end="\r")
- cv2.imshow('frame', im_[:, :, ::-1])
- if cv2.waitKey(1) & 0xFF == ord('q'):
- break
-
-
-@click.command()
-@click.argument("config_path", type=click.Path(exists=True))
-@click.option("-i", "--input_path", help="Input path. Accepted inputs: images, videos, directories.")
-@click.option("-o", "--output_path", default=None, type=click.Path(), help="Output path to save. Can be directory or file.")
-@click.option("-v","--visualize", default=False, is_flag=True, help="Visualize the result")
-@click.option("--max-res", default=None, type=int, help="Maximum resolution of height/wideo")
-@click.option("--start-time", "--st", default=0, type=int, help="Start time (second) for vide anonymization")
-@click.option("--end-time", "--et", default=None, type=int, help="End time (second) for vide anonymization")
-@click.option("--fps", default=None, type=int, help="FPS for anonymization")
-@click.option("--detection-score-threshold", "--dst", default=.3, type=click.FloatRange(0, 1), help="Detection threshold, threshold applied for all detection models.")
-@click.option("--visualize-detection", "--vd",default=False, is_flag=True, help="Visualize only detections without running anonymization.")
-@click.option("--multi-modal-truncation", "--mt", default=False, is_flag=True, help="Enable multi-modal truncation proposed by: https://arxiv.org/pdf/2202.12211.pdf")
-@click.option("--cache", default=False, is_flag=True, help="Enable detection caching. Will save and load detections from cache.")
-@click.option("--amp", default=True, is_flag=True, help="Use automatic mixed precision for generator forward pass")
-@click.option("-t", "--truncation_value", default=0, type=click.FloatRange(0, 1), help="Latent interpolation truncation value.")
-@click.option("--track", default=False, is_flag=True, help="Track detections over frames. Will use the same latent variable (z) for tracked identities.")
-@click.option("--seed", default=0, type=int, help="Set random seed for generating images.")
-@click.option("--person-generator", default=None, help="Config path to unconditional person generator", type=click.Path())
-@click.option("--cse-person-generator", default=None, help="Config path to CSE-guided person generator", type=click.Path())
-@click.option("--webcam", default=False, is_flag=True, help="Read image from webcam feed.")
-def anonymize_path(
- config_path,
- input_path,
- output_path,
- detection_score_threshold: float,
- visualize_detection: bool,
- cache: bool,
- seed: int,
- person_generator: str,
- cse_person_generator: str,
- webcam: bool,
- **kwargs):
- """
- config_path: Specify the path to the anonymization model to use.
- """
- tops.set_seed(seed)
- cfg = utils.load_config(config_path)
- if person_generator is not None:
- cfg.anonymizer.person_G_cfg = person_generator
- if cse_person_generator is not None:
- cfg.anonymizer.cse_person_G_cfg = cse_person_generator
- cfg.detector.score_threshold = detection_score_threshold
- utils.print_config(cfg)
-
- anonymizer = instantiate(cfg.anonymizer, load_cache=cache)
- synthesis_kwargs = ["amp", "multi_modal_truncation", "truncation_value"]
- synthesis_kwargs = {k: kwargs.pop(k) for k in synthesis_kwargs}
-
- kwargs["anonymizer"] = anonymizer
- kwargs["visualize_detection"] = visualize_detection
- kwargs["synthesis_kwargs"] = synthesis_kwargs
- if webcam:
- anonymize_webcam(**kwargs)
- return
- input_path = Path(input_path)
- output_path = Path(output_path) if output_path is not None else None
- if output_path is None and not kwargs["visualize"]:
- logger.log("Output path not set. Setting visualize to True")
- kwargs["visualize"] = True
- if input_path.is_dir():
- assert output_path is None or not output_path.is_file()
- anonymize_directory(input_path, output_path, **kwargs)
- else:
- anonymize_file(input_path, output_path, **kwargs)
-
-
-if __name__ == "__main__":
-
- anonymize_path()
diff --git a/deep_privacy/attribute_guided_demo.py b/deep_privacy/attribute_guided_demo.py
deleted file mode 100644
index e47e23d6c6e5d814b589c61be27df7ae750b1d51..0000000000000000000000000000000000000000
--- a/deep_privacy/attribute_guided_demo.py
+++ /dev/null
@@ -1,144 +0,0 @@
-from collections import defaultdict
-import gradio
-import numpy as np
-import torch
-import cv2
-from PIL import Image
-from dp2 import utils
-from tops.config import instantiate
-import tops
-import gradio.inputs
-from stylemc import get_and_cache_direction, get_styles
-
-
-class GuidedDemo:
- def __init__(self, face_anonymizer, cfg_face) -> None:
- self.anonymizer = face_anonymizer
- assert sum([x is not None for x in list(face_anonymizer.generators.values())]) == 1
- self.generator = [x for x in list(face_anonymizer.generators.values()) if x is not None][0]
- face_G_cfg = utils.load_config(cfg_face.anonymizer.face_G_cfg)
- face_G_cfg.train.batch_size = 1
- self.dl = instantiate(face_G_cfg.data.val.loader)
- self.cache_dir = face_G_cfg.output_dir
- self.precompute_edits()
-
- def precompute_edits(self):
- self.precomputed_edits = set()
- for edit in self.precomputed_edits:
- get_and_cache_direction(self.cache_dir, self.dl, self.generator, edit)
- if self.cache_dir.joinpath("stylemc_cache").is_dir():
- for path in self.cache_dir.joinpath("stylemc_cache").iterdir():
- text_prompt = path.stem.replace("_", " ")
- self.precomputed_edits.add(text_prompt)
- print(text_prompt)
- self.edits = defaultdict(defaultdict)
-
- def anonymize(self, img, show_boxes: bool, current_box_idx: int, current_styles, current_boxes, update_identity, edits, cache_id=None):
- if not isinstance(img, torch.Tensor):
- img, cache_id = pil2torch(img)
- img = tops.to_cuda(img)
-
- current_box_idx = current_box_idx % len(current_boxes)
- edited_styles = [s.clone() for s in current_styles]
- for face_idx, face_edits in edits.items():
- for prompt, strength in face_edits.items():
- direction = get_and_cache_direction(self.cache_dir, self.dl, self.generator, prompt)
- edited_styles[int(face_idx)] += direction * strength
- update_identity[int(face_idx)] = True
- assert img.dtype == torch.uint8
- img = self.anonymizer(
- img, truncation_value=0,
- multi_modal_truncation=True, amp=True,
- cache_id=cache_id,
- all_styles=edited_styles,
- update_identity=update_identity)
- update_identity = [True for i in range(len(update_identity))]
- img = utils.im2numpy(img)
- if show_boxes:
- x0, y0, x1, y1 = [int(_) for _ in current_boxes[int(current_box_idx)]]
- img = cv2.rectangle(img, (x0, y0), (x1, y1), (255, 0, 0), 1)
- return img, update_identity
-
- def update_image(self, img, show_boxes):
- img, cache_id = pil2torch(img)
- img = tops.to_cuda(img)
- det = self.anonymizer.detector.forward_and_cache(img, cache_id, load_cache=True)[0]
- current_styles = []
- for i in range(len(det)):
- # Need to do forward pass to register all affine modules.
- batch = det.get_crop(i, img)
- batch["condition"] = batch["img"].float()
-
- s = get_styles(
- np.random.randint(0, 999999),self.generator,
- batch, truncation_value=0)
- current_styles.append(s)
- update_identity = [True for i in range(len(det))]
- current_boxes = np.array(det.boxes)
- edits = defaultdict(defaultdict)
- cur_face_idx = -1 % len(current_boxes)
- img, update_identity = self.anonymize(img, show_boxes, cur_face_idx, current_styles, current_boxes, update_identity, edits, cache_id=cache_id)
- return img, current_styles, current_boxes, update_identity, edits, cur_face_idx
-
- def change_face(self, change, cur_face_idx, current_boxes, input_image, show_boxes, current_styles, update_identity, edits):
- cur_face_idx = (cur_face_idx+change) % len(current_boxes)
- img, update_identity = self.anonymize(input_image, show_boxes, cur_face_idx, current_styles, current_boxes, update_identity, edits)
- return img, update_identity, cur_face_idx
-
- def add_style(self, face_idx: int, prompt: str, strength: float, input_image, show_boxes, current_styles, current_boxes, update_identity, edits):
- face_idx = face_idx % len(current_boxes)
- edits[face_idx][prompt] = strength
- img, update_identity = self.anonymize(input_image, show_boxes, face_idx, current_styles, current_boxes, update_identity, edits)
- return img, update_identity, edits
-
- def setup_interface(self):
- current_styles = gradio.State()
- current_boxes = gradio.State(None)
- update_identity = gradio.State([])
- edits = gradio.State([])
- with gradio.Row():
- input_image = gradio.Image(
- type="pil", label="Upload your image or try the example below!",source="webcam")
- output_image = gradio.Image(type="numpy", label="Output")
- with gradio.Row():
- update_btn = gradio.Button("Update Anonymization").style(full_width=True)
- with gradio.Row():
- show_boxes = gradio.Checkbox(value=True, label="Show Selected")
- cur_face_idx = gradio.Number(value=-1,label="Current", interactive=False)
- previous = gradio.Button("Previous Person")
- next_ = gradio.Button("Next Person")
- with gradio.Row():
- text_prompt = gradio.Textbox(
- placeholder=" | ".join(list(self.precomputed_edits)),
- label="Text Prompt for Edit")
- edit_strength = gradio.Slider(0, 5, step=.01)
- add_btn = gradio.Button("Add Edit")
- add_btn.click(self.add_style, inputs=[cur_face_idx, text_prompt, edit_strength, input_image, show_boxes, current_styles, current_boxes, update_identity, edits], outputs=[output_image, update_identity, edits])
- update_btn.click(self.update_image, inputs=[input_image, show_boxes], outputs=[output_image, current_styles, current_boxes, update_identity, edits, cur_face_idx])
- input_image.change(self.update_image, inputs=[input_image, show_boxes], outputs=[output_image, current_styles, current_boxes, update_identity, edits, cur_face_idx])
- previous.click(self.change_face, inputs=[gradio.State(-1), cur_face_idx, current_boxes, input_image, show_boxes, current_styles, update_identity, edits], outputs=[output_image, update_identity, cur_face_idx])
- next_.click(self.change_face, inputs=[gradio.State(1), cur_face_idx, current_boxes, input_image, show_boxes, current_styles, update_identity, edits], outputs=[output_image, update_identity, cur_face_idx])
-
- show_boxes.change(self.anonymize, inputs=[input_image, show_boxes, cur_face_idx, current_styles, current_boxes, update_identity, edits], outputs=[output_image, update_identity])
-
-
-def pil2torch(img: Image.Image):
- img = img.convert("RGB")
- img = np.array(img)
- img = np.rollaxis(img, 2)
- return torch.from_numpy(img), None
-
-
-cfg_face = utils.load_config("configs/anonymizers/face.py")
-anonymizer_face = instantiate(cfg_face.anonymizer, load_cache=False)
-anonymizer_face.initialize_tracker(fps=1)
-
-
-with gradio.Blocks() as demo:
- gradio.Markdown("#
DeepPrivacy2 - Realistic Image Anonymization ")
- gradio.Markdown("### Håkon Hukkelås, Rudolf Mester, Frank Lindseth ")
- with gradio.Tab("Text-Guided Anonymization"):
- GuidedDemo(anonymizer_face, cfg_face).setup_interface()
-
-
-demo.launch()
\ No newline at end of file
diff --git a/deep_privacy/readme.md b/deep_privacy/readme.md
deleted file mode 100644
index 2f01a194a34823642a7a653f68f345ac11df3a96..0000000000000000000000000000000000000000
--- a/deep_privacy/readme.md
+++ /dev/null
@@ -1,209 +0,0 @@
-# DeepPrivacy2 - A Toolbox for Realistic Image Anonymization
-[[PDF]](http://arxiv.org/abs/2211.09454) [](https://huggingface.co/spaces/haakohu/deep_privacy2)
-[[Video Demo]](https://youtu.be/Kt3au719hhk)
-[[WACV 2023 Presentation]](https://youtu.be/wwKRkkzxKuM)
-
-
-DeepPrivacy2 is a toolbox for realistic anonymization of humans, including a face and a full-body anonymizer.
-
-
-
-DeepPrivacy2 detects and anonymizes individuals via three detection and synthesis networks; (1) a CSE-guided generator for individuals detected with dense pose (by CSE), (2) an unconditional full-body generator for cases where CSE fails to detect (note the segmented persons without color-coded CSE detections), and (3) a face generator for the remaining individuals (marked in red).
-
-
-## What's new
-
-This repository improves over the original [DeepPrivacy](https://github.com/hukkelas/DeepPrivacy) repository with the following new features:
-- **Full-body anonymization:** Anonymize the entire human body with a single generator
-- **Improved Face Anonymization:** Improved quality and higher resolution (256x256 vs. 128x128) face anonymization without relying on facial landmark detection.
-- **Attribute Guided Anonymiation:** Anonymize faces guided on text prompts using [StyleMC](https://github.com/catlab-team/stylemc).
-- **Code cleanup and general improvements:** Extensive refactoring, bugfixes, and improvements yielding improved results and faster training.
-
-## Installation
-### Requirements
-- Pytorch >= 1.10
-- Torchvision >= 0.12
-- Python >= 3.8
-- CUDA capable device for training. Training was done with 1-8 32GB V100 GPUs.
-
-
-### Installation
-We recommend to setup and install pytorch with [anaconda](https://www.anaconda.com/) following the [pytorch installation instructions](https://pytorch.org/get-started/locally/).
-
-1. Clone repository: `git clone https://github.com/hukkelas/deep_privacy2/`.
-2. Install using `setup.py`:
-```
-pip install -e .
-```
-or:
-```
-pip install git+https://github.com/hukkelas/deep_privacy2/
-```
-
-### Installation with Docker
-
-1. Install [nvidia-docker](https://github.com/NVIDIA/nvidia-docker) to support GPU acceleration.
-2. Build the docker image using the [Dockerfile](Dockerfile).
-```bash
-# If you're not planning to train the network (or not use wandb logging), you can remove the WANDB_API_KEY argument.
-docker build -t deep_privacy2 --build-arg WANDB_API_KEY=YOUR_WANDB_KEY --build-arg UID=$(id -u) --build-arg UNAME=$(id -un) .
-```
-3. Run the docker image with selected command:
-```
-docker run --runtime=nvidia --gpus '"device=0"' --name deep_privacy2 --ipc=host -u $(id -u) -v $PWD:/home/$(id -un) --rm deep_privacy2 python3 anonymize.py configs/anonymizers/deep_privacy1.py -i media/regjeringen.jpg -o output.png
-```
-
-
-## Anonymization
-[anonymize.py](anonymize.py) is the main script for anonymization.
-
-The typical usage is
-```
-python3 anonymize.py configs/anonymizers/FB_cse.py -i path_to_image.png
-```
-where the first argument is the chosen anonymizer (see below for the different models) and the second a path to an image/folder/video.
-
-There are several optional arguments, see `python3 anonymize.py --help` for more info.
-```
-python3 anonymize.py -h
-Usage: anonymize.py [OPTIONS] CONFIG_PATH
-
- config_path: Specify the path to the anonymization model to use.
-
-Options:
- -i, --input_path PATH Input path. Accepted inputs: images, videos,
- directories.
- -o, --output_path PATH Output path to save. Can be directory or
- file.
- --visualize Visualize the result
- --max_res INTEGER Maximum resolution of height/wideo
- --start-time, --st INTEGER Start time (second) for vide anonymization
- --end-time, --et INTEGER End time (second) for vide anonymization
- --fps INTEGER FPS for anonymization
- --detection-score-threshold FLOAT RANGE
- Detection threshold, threshold applied for
- all detection models. [0<=x<=1]
- --visualize-detection Visualize only detections without running
- anonymization.
- --multi-modal-truncation, --mt Enable multi-modal truncation proposed by:
- https://arxiv.org/pdf/2202.12211.pdf
- --no-cache Disable loading of detection cache. Will
- rerun all detections.
- --amp Use automatic mixed precision for generator
- forward pass
- -t, --truncation_value FLOAT RANGE
- Latent interpolation truncation value.
- [0<=x<=1]
- --track Track detections over frames. Will use the
- same latent variable (z) for tracked
- identities.
- --seed INTEGER Set random seed for generating images.
- --person-generator PATH Config path to unconditional person
- generator
- --cse-person-generator PATH Config path to CSE-guided person generator
- --webcam Read image from webcam feed.
- --help Show this message and exit.
-
-```
-
-**Singe image anonymization**
-```
-python3 anonymize.py configs/anonymizers/FB_cse.py -i path_to_image.png --output_path output.png
-```
-
-**Folder anonymization**
-
-If a folder is given as the input, all image and video files in the given folder will be anonymized and placed under --output_path. The script will duplicate the directory structure/filenames in the given folder for the output.
-```
-python3 anonymize.py configs/anonymizers/FB_cse.py -i path/to/input/folder --output_path output_folder
-```
-
-**Video anonymization**
-```
-python3 anonymize.py configs/anonymizers/FB_cse.py -i path_to_video.mp4 --output_path output.mp4
-```
-
-**Webcam anonymization**
-```
-python3 anonymize.py configs/anonymizers/FB_cse.py --webcam
-```
-
-### Available anonymization models
-DeepPrivacy2 provides the following anonymization models:
-
-- [`configs/anonymizers/FB_cse.py`](configs/anonymizers/FB_cse.py): Full-body anonymizer that only anonymizes individuals detected by CSE. This provides the highest quality anonymization, however, some individuals might not be detected by CSE.
-- [`configs/anonymizers/FB_cse_mask.py`](configs/anonymizers/FB_cse_mask.py): Full-body anonymizer that anonymizes all individuals detected by CSE or Mask R-CNN. In difference from `configs/anonymizers/FB_cse.py`, this model anonymizes individuals not detected by CSE with an unguided generator.
-- [`configs/anonymizers/FB_cse_mask_face.py`](configs/anonymizers/FB_cse_mask_face.py): Full-body and face anonymizer that anonymizes all individuals detected by CSE, Mask R-CNN or by face detection. Compared to `configs/anonymizers/FB_cse_mask.py`, this model anonymizes individuals not detected by CSE or Mask R-CNN with a face anonymizer.
-- [`configs/anonymizers/face.py`](configs/anonymizers/face.py): The face anonymizer only anonymizes a center crop of the face.
-- [`configs/anonymizers/face_fdf128.py`](configs/anonymizers/face_fdf128.py): Same as [`configs/anonymizers/face.py`](configs/anonymizers/face.py), but the generator is trained on lower resolution images (128x128 or lower). Recommended to use if you will not anonymize any faces larger than 128x128. **Model will be released soon.**
-
-## Attribute guided anonymization
-DeepPrivacy2 allows for controllable anonymization through text prompts by adapting [StyleMC](https://github.com/catlab-team/stylemc).
-StyleMC finds global semantically meaningful directions in the GAN latent space by manipulating images towards a given text prompt with a [CLIP](https://github.com/openai/CLIP)-based loss.
-
-
-The repository includes a  demo for interactive text-guided anonymization.
-To use the demo, first:
-
-1. Download the FDF256 dataset (see below). Only the validation set is required.
-2. Run the following:
-```
-python3 attribute_guided_demo.py
-```
-
-The script will spin up a local webserver.
-
-
-## Training
-First, download dataset for training (see below).
-
-To start training, type the following:
-```
-python3 train.py configs/fdh/styleganL.py
-```
-The training automatically logs to [wandb](https://wandb.ai/).
-
-### Model development utility scripts
-**Dataset inspection:** To inspect the training dataset, you can use:
-```
-python3 -m tools.inspect_dataset configs/fdh/styleganL.py
-```
-
-**Sanity check:**
-```
-python3 -m tools.dryrun configs/fdh/styleganL.py
-```
-
-**Output visualization:** To visualize output of trained models:
-```
-python3 -m tools.show_examples configs/fdh/styleganL.py
-```
-
-
-#### Calculating metrics
-```
-python3 validate.py configs/fdh/styleganL.py
-```
-**NOTE:** The metrics calculated with validate.py will slightly differ from training metrics, as validate.py disables automatic mixed precision.
-
-
-## Dataset Setup
-
-**Setting Data directory:**
-The default dataset directory is ./data. If you want to change the dataset directory, set the environment variable `BASE_DATASET_DIR`. For example, `export BASE_DATASET_DIR=/work/data/`.
-
-
-### FDF256
-Follow the instructions [here](https://github.com/hukkelas/FDF/blob/master/FDF256.md) to download the FDF256 dataset. The dataset should be placed in the directory: `data/fdf256`.
-
-### FDH
-Follow the instructions [here](https://www.github.com/hukkelas/FDH) to download the FDH dataset. The dataset should be placed in the directory: `data/fdh`.
-
-
-## License
-This repsitory is released under [Apache 2.0 License](License), except for the following:.
-
-- Code under `sg3_torch_utils/`. This code is modified from [github.com/NVlabs/stylegan2-ada-pytorch](https://github.com/NVlabs/stylegan2-ada-pytorch). Separate license is attached in the directory.
-- Detection network: See [Detectron2 License](https://github.com/facebookresearch/detectron2/blob/main/LICENSE).
-- All checkpoints follow the license of the datasets. See the respective datasets for more information.
-- Code under `dp2/detection/models/vit_pose`. This code is modified from [https://github.com/gpastal24/ViTPose-Pytorch](https://github.com/gpastal24/ViTPose-Pytorch), where code is adapted from OpenMMLab. Original license is [Apache 2-0](https://github.com/open-mmlab/mmpose/blob/master/LICENSE).
diff --git a/deep_privacy/setup.py b/deep_privacy/setup.py
deleted file mode 100644
index 3cc1ae0c71ac8c44c7e662f14e9a4e0d1d7dcf69..0000000000000000000000000000000000000000
--- a/deep_privacy/setup.py
+++ /dev/null
@@ -1,46 +0,0 @@
-import torch
-import torchvision
-from setuptools import setup, find_packages
-
-torch_ver = [int(x) for x in torch.__version__.split(".")[:2]]
-assert torch_ver >= [1, 9], "Requires PyTorch >= 1.9"
-torchvision_ver = [int(x) for x in torchvision.__version__.split(".")[:2]]
-assert torchvision_ver >= [0, 11], "Requires torchvision >= 0.11"
-
-setup(
- name="dp2",
- version="0.1.0",
- packages=find_packages(),
- install_requires=[
- "numpy>=1.20",
- "cython",
- "matplotlib",
- "tqdm",
- "tensorboard",
- "opencv-python",
- "detectron2-densepose@git+https://github.com/facebookresearch/detectron2@96c752ce821a3340e27edd51c28a00665dd32a30#subdirectory=projects/DensePose",
- "torch_fidelity==0.3.0",
- "ninja==1.10.2",
- "moviepy",
- "pyspng",
- "face_detection@git+https://github.com/hukkelas/DSFD-Pytorch-Inference",
- "wandb",
- "termcolor",
- "tops@git+https://github.com/hukkelas/torch_ops.git",
- "motpy@git+https://github.com/wmuron/motpy@c77f85d27e371c0a298e9a88ca99292d9b9cbe6b",
- "fast_pytorch_kmeans",
- "einops",
- "einops_exts",
- "regex",
- "setuptools==59.5.0",
- "resize_right==0.0.2",
- "pillow==8.3.1",
- "scipy==1.7.1",
- "webdataset==0.2.26",
- "scikit-image",
- "imageio==2.4.1",
- "timm==0.6.7",
- "clip@git+https://github.com/openai/CLIP.git@b46f5ac7587d2e1862f8b7b1573179d80dcdd620",
-
- ],
-)
diff --git a/deep_privacy/stylemc.py b/deep_privacy/stylemc.py
deleted file mode 100644
index 96da068367243b1ec9fbf70f015083ae7a138a33..0000000000000000000000000000000000000000
--- a/deep_privacy/stylemc.py
+++ /dev/null
@@ -1,180 +0,0 @@
-"""
-Approach: "StyleMC: Multi-Channel Based Fast Text-Guided Image Generation and Manipulation"
-Original source code:
-https://github.com/autonomousvision/stylegan_xl/blob/f9be58e98110bd946fcdadef2aac8345466faaf3/run_stylemc.py#
-Modified by Håkon Hukkelås
-"""
-import click
-from pathlib import Path
-import tqdm
-from dp2 import utils
-import tops
-from timeit import default_timer as timer
-import torch
-import torch.nn.functional as F
-from torchvision.transforms.functional import resize, normalize
-import clip
-from dp2.gan_trainer import AverageMeter
-from tops.config import instantiate
-from dp2.utils import vis_utils
-
-
-def spherical_dist_loss(x, y):
- x = F.normalize(x, dim=-1)
- y = F.normalize(y, dim=-1)
- return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
-
-
-def prompts_dist_loss(x, targets):
- loss = spherical_dist_loss
- if len(targets) == 1:
- return loss(x, targets[0])
- distances = [loss(x, target) for target in targets]
- return torch.stack(distances, dim=-1).sum(dim=-1)
-
-affine_modules = None
-max_ch = None
-
-@torch.no_grad()
-def init_affine_modules(G, batch):
- global affine_modules, max_ch
- affine_modules = []
- max_ch = 0
- def forward_hook(block, input_ ,output_):
- global max_ch
- affine_modules.append(block)
- max_ch = max(max_ch, block.affine.out_features*(1+hasattr(block, "affine_beta")))
- removable_handles = []
- for block in G.modules():
- if hasattr(block, "affine") and hasattr(block.affine, "weight"):
- removable_handles.append(block.register_forward_hook(forward_hook))
- G(**batch)
- for hook in removable_handles:
- hook.remove()
-
-@torch.no_grad()
-def get_styles(seed, G: torch.nn.Module, batch, truncation_value=1):
- global affine_modules, max_ch
- if affine_modules is None:
- init_affine_modules(G, batch)
- w = G.style_net.get_truncated(truncation_value, **batch, seed=seed)
-
- all_styles = torch.zeros((len(affine_modules), max_ch), device=batch["img"].device, dtype=torch.float32)
- for i, block in enumerate(affine_modules):
- gamma0 = block.affine(w)
- if hasattr(block, "affine_beta"):
- beta0 = block.affine_beta(w)
- gamma0 = torch.cat((gamma0, beta0), dim=1)
- all_styles[i] = F.pad(gamma0, ((0, max_ch - gamma0.shape[-1])), "constant", 0)
-
- return all_styles
-
-
-def get_and_cache_direction(output_dir: Path, dl_val, G, text_prompt):
- cache_path = output_dir.joinpath(
- "stylemc_cache", text_prompt.replace(" ", "_") + ".torch")
- if cache_path.is_file():
- print("Loaded cache from:", cache_path)
- return torch.load(cache_path)
- direction = find_direction(G, text_prompt, dl_val=iter(dl_val))
- cache_path.parent.mkdir(exist_ok=True, parents=True)
- torch.save(direction, cache_path)
- return direction
-
-
-@torch.cuda.amp.autocast()
-def find_direction(
- G,
- text_prompt,
- n_iterations=128*8*10,
- batch_size=8,
- dl_val=None
-):
- time_start = timer()
- clip_model = clip.load("ViT-B/16", device=tops.get_device())[0]
- target = [clip_model.encode_text(clip.tokenize(text_prompt).to(tops.get_device())).float()]
- first_batch = next(dl_val)
- first_batch["embedding"] = None if "embedding" not in first_batch else first_batch["embedding"]
- s = get_styles(0, G, first_batch, 0)
- # stats tracker
- tracker = AverageMeter()
- n_iterations = n_iterations // batch_size
-
- # initalize styles direction
- direction = torch.zeros(s.shape, device=tops.get_device())
- direction.requires_grad_()
- utils.set_requires_grad(G, False)
- direction_tracker = torch.zeros_like(direction)
- opt = torch.optim.AdamW([direction], lr=0.05, betas=(0., 0.999), weight_decay=0.25)
-
- grads = []
- for seed_idx in tqdm.trange(n_iterations):
- # forward pass through synthesis network with new styles
- if seed_idx == 0:
- batch = first_batch
- else:
- batch = next(dl_val)
- batch["embedding"] = None if "embedding" not in batch else batch["embedding"]
- styles = get_styles(seed_idx, G, batch) + direction
- img = G(**batch, s=iter(styles))["img"]
-
- # clip loss
- img = (img + 1)/2
- img = normalize(img, mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711))
- img = resize(img, (224, 224))
- embeds = clip_model.encode_image(img)
- cos_sim = prompts_dist_loss(embeds, target)
- cos_sim.backward(retain_graph=True)
- # track stats
- tracker.update(dict(cos_sim=cos_sim, norm=torch.norm(direction)))
- if not (seed_idx % batch_size):
- opt.step()
- grads.append(direction.grad.clone())
- direction.grad.data.zero_()
- print(tracker.get_average())
- tracker = AverageMeter()
-
- # throw out fluctuating channels
- direction = direction.detach()
- direction[direction_tracker > n_iterations / 4] = 0
- print(direction)
- print(f"Time for direction search: {timer() - time_start:.2f} s")
- return direction
-
-
-@click.command()
-@click.argument("config_path")
-@click.argument("text_prompt")
-@click.option("-n", default=50, type=int)
-def main(config_path: str, text_prompt: str, n: int):
- from dp2.infer import build_trained_generator
- from PIL import Image
- cfg = utils.load_config(config_path)
- G = build_trained_generator(cfg)
- cfg.train.batch_size = 1
- dl_val = instantiate(cfg.data.val.loader)
- direction = get_and_cache_direction(cfg.output_dir, dl_val, G, text_prompt)
- output_dir = Path("stylemc_results")
- output_dir.mkdir(exist_ok=True, parents=True)
- save = lambda x, path: Image.fromarray(utils.im2numpy(x, True, True)[0]).save(path)
- strenghts = [0, 0.05, 0.1, 0.2, 0.3, 0.4, 1.0]
- for i, batch in enumerate(iter(dl_val)):
- imgs = []
-
- img = vis_utils.visualize_batch(**batch)
- img = tops.im2numpy(img, False)[0]
- imgs.append(img)
- if i > n:
- break
- for strength in strenghts:
- styles = get_styles(i, G, batch, truncation_value=0) + direction*strength
- img = G(**batch, s=iter(styles))["img"]
- imgs.append(utils.im2numpy(img, True, True)[0])
-
- img = tops.np_make_image_grid(imgs, nrow=1)
- Image.fromarray(img).save(output_dir.joinpath(f"results_{i}.png"))
-
-
-if __name__ == "__main__":
- main()
-
diff --git a/deep_privacy/tools/__init__.py b/deep_privacy/tools/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/deep_privacy/tools/compute_cluster_means.py b/deep_privacy/tools/compute_cluster_means.py
deleted file mode 100644
index 1199b4403c3c6caa5495aaa40b80176cd3e901b0..0000000000000000000000000000000000000000
--- a/deep_privacy/tools/compute_cluster_means.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Compute k-means cluster for W (Self-Distilled StyleGAN: Towards Generation from Internet Photos)
-# pip install fast-pytorch-kmeans
-import click
-import tqdm
-import torch
-from dp2.utils import load_config
-from dp2.infer import build_trained_generator
-import tops
-from tops.checkpointer.checkpointer import get_ckpt_paths, load_checkpoint
-from fast_pytorch_kmeans import KMeans
-
-
-@click.command()
-@click.argument("config_path")
-@click.option("-n", "--n_samples", default=int(600e3), type=int)
-@click.option( "--n_centers", "--nc", default=512, type=int)
-@click.option( "--batch_size", default=512, type=int)
-def compute_cluster_means(config_path, n_samples, n_centers, batch_size):
- cfg = load_config(config_path)
- G = build_trained_generator(cfg, map_location=torch.device("cpu"))
- n_batches = n_samples // batch_size
- n_samples = n_samples // batch_size * batch_size
- print("Computing clusters over", n_samples, "samples.")
- style_net = G.stylenet if hasattr(G, "stylenet") else G.style_net
- style_net = tops.to_cuda(style_net)
- w_dim = style_net.w_dim
- z_dim = style_net.z_dim
- with torch.inference_mode():
- w = torch.zeros((n_samples, w_dim), device=tops.get_device(), dtype=torch.float32)
-
- for i in tqdm.trange(n_batches):
- w[i*batch_size:(i+1)*batch_size] = style_net(torch.randn((batch_size, z_dim), device=tops.get_device())).cpu()
- kmeans = KMeans(n_clusters=n_centers, mode='euclidean', verbose=10, max_iter=1000, tol=0.00001)
-
- kmeans.fit_predict(w)
- centers = kmeans.centroids
-
- if hasattr(style_net, "w_centers"):
- del style_net.w_centers
- style_net.register_buffer("w_centers", centers)
- ckpt_path = get_ckpt_paths(cfg.checkpoint_dir)[-1]
- ckpt = load_checkpoint(ckpt_path, map_location="cpu")
- ckpt["EMA_generator"] = G.state_dict()
- torch.save(ckpt, ckpt_path)
-
-compute_cluster_means()
-
\ No newline at end of file
diff --git a/deep_privacy/tools/dryrun.py b/deep_privacy/tools/dryrun.py
deleted file mode 100644
index 5392273c839d702574d1e33d554999504339ab34..0000000000000000000000000000000000000000
--- a/deep_privacy/tools/dryrun.py
+++ /dev/null
@@ -1,49 +0,0 @@
-import click
-import torch
-import tops
-from tops.config import instantiate
-from dp2 import utils
-
-@click.command()
-@click.argument("config_path")
-def run(config_path):
- cfg = utils.load_config(config_path)
- utils.print_config(cfg)
-
- G = tops.to_cuda(instantiate(cfg.generator))
-
- D = tops.to_cuda(instantiate(cfg.discriminator))
- cfg.train.batch_size = 2
- print(G)
- dl_val = instantiate(cfg.data.val.loader)
- cfg.train.amp.scaler_D.init_scale = 1
- scaler = instantiate(cfg.train.amp.scaler_D)
- loss_fnc = instantiate(cfg.loss_fnc, D=D, G=G)
- batch = next(iter(dl_val))
- tops.print_module_summary(G, batch, max_nesting=10)
-# tops.print_module_summary(D, batch, max_nesting=10)
-
- print("G PARAMS:", tops.num_parameters(G) / 10 ** 6)
- print("D PARAMS:", tops.num_parameters(D) / 10 ** 6)
- print(f"Number of trainable parameters in D: {sum(p.numel() for p in D.parameters() if p.requires_grad)/10**6}M")
- print(f"Number of trainable parameters in G: {sum(p.numel() for p in G.parameters() if p.requires_grad)/10**6}M" )
-
- with torch.cuda.amp.autocast(True):
- o_G = G(**batch)
- o_D = D(**batch)
- print("FORWARD OK")
- D_loss, to_log = loss_fnc.D_loss(batch, grad_scaler=scaler)
- D_loss.backward()
- assert all([p.grad is not None or not p.requires_grad for p in D.parameters()])
- print(to_log)
-
- G_loss, _ = loss_fnc.G_loss(batch, grad_scaler=scaler)
- G_loss.backward()
- G: torch.nn.Module = G
- for name, p in G.named_parameters():
- if p.grad is None and p.requires_grad:
- print(name)
- assert all([p.grad is not None or not p.requires_grad for p in G.parameters()])
-
-if __name__ == "__main__":
- run()
diff --git a/deep_privacy/tools/inspect_dataset.py b/deep_privacy/tools/inspect_dataset.py
deleted file mode 100644
index 14a19d83257c17f9e810253cc5414cc5a48b308b..0000000000000000000000000000000000000000
--- a/deep_privacy/tools/inspect_dataset.py
+++ /dev/null
@@ -1,52 +0,0 @@
-import cv2
-import torch
-import click
-import tops
-from tops.config import LazyConfig, instantiate
-from dp2 import utils
-from dp2.utils import vis_utils
-import numpy as np
-from PIL import Image
-
-def get_image(batch, cfg, fscale_vis):
- im0 = batch["condition"]
- im1 = batch["img"]
- im = utils.denormalize_img(torch.cat((im0, im1), dim=-1)).mul(255).byte()
- im = torch.cat((im, vis_utils.visualize_batch(**batch)), dim=-1)
-
- im = utils.im2numpy(im)
-
- im = tops.np_make_image_grid(im, nrow=len(im0))
- if fscale_vis != 1:
- new_shape = [int(_*fscale_vis) for _ in im.shape[:2][::-1]]
- im = np.array(Image.fromarray(im).resize(new_shape))
- return im
-
-
-@click.command()
-@click.argument("config_path")
-@click.option("--train", default=False, is_flag=True)
-@click.option("-n", "--num_images", default=8, type=int)
-@click.option("-f", "--fscale_vis", default=1)
-def main(config_path: str, train: bool, num_images: int, fscale_vis):
- cfg = LazyConfig.load(config_path)
- if train:
- dl_cfg = cfg.data.train.loader
- else:
- dl_cfg = cfg.data.val.loader
- dl_cfg.batch_size = num_images
- dl = instantiate(dl_cfg)
- print(dl.image_gpu_transform)
- dl = iter(dl)
-
- while True:
- batch = next(dl)
- im = get_image(batch, cfg, fscale_vis)
- cv2.imshow("", im[:, :, ::-1])
- key = cv2.waitKey(0)
- if key == ord("q"):
- exit()
-
-
-if __name__ == "__main__":
- main()
\ No newline at end of file
diff --git a/deep_privacy/tools/show_examples.py b/deep_privacy/tools/show_examples.py
deleted file mode 100644
index 423aeb2b3a75ff6b66166b9adcf7c33f4cdaff72..0000000000000000000000000000000000000000
--- a/deep_privacy/tools/show_examples.py
+++ /dev/null
@@ -1,87 +0,0 @@
-import cv2
-import torch
-import numpy as np
-import click
-import tops
-import tqdm
-from tops.config import instantiate
-from PIL import Image
-from dp2 import utils, infer
-from dp2.utils import vis_utils
-from torchvision.transforms.functional import resize
-
-
-@torch.no_grad()
-@torch.cuda.amp.autocast()
-def get_im(dl, G, num_images, num_z, fscale_vis, truncation_value: float, b_idx, multi_modal_truncation, show_lowres: bool):
- ims = []
- G.update_w()
- for im_idx in tqdm.trange(num_images, desc="Sampling images"):
- batch = next(dl)
- ims.append(utils.im2numpy(batch["img"], True, True)[0])
- ims.append(utils.im2numpy(batch["condition"], True, True)[0])
- ims.append(utils.im2numpy(vis_utils.visualize_batch(**batch))[0])
- for z_idx in range(num_z):
- # Sample same Z by setting seed for different images
- tops.set_seed(b_idx*num_z + z_idx)
- if multi_modal_truncation and z_idx > 0:
- fake = G.multi_modal_truncate(**batch, truncation_value=0, w_indices=[z_idx-1])
- else:
- fake = G.sample(**batch, truncation_value=truncation_value)
- if "x_lowres" in fake and show_lowres:
- for x in fake["x_lowres"]:
- x = resize(x, fake["img"].shape[-2:])
- ims.append(utils.im2numpy(x, to_uint8=True, denormalize=True)[0])
- ims.append(utils.im2numpy(fake["img"], to_uint8=True, denormalize=True)[0])
- if fscale_vis != 1:
- new_shape = [int(_*fscale_vis) for _ in ims[0].shape[:2][::-1]]
- ims = [np.array(Image.fromarray(im).resize(new_shape)) for im in ims]
- im = tops.np_make_image_grid(ims, nrow=num_images)
- return im
-
-
-@click.command()
-@click.argument("config_path")
-@click.option("-n", "--num_images", default=8)
-@click.option("--num_z", "--nz", default=8)
-@click.option("-f", "--fscale_vis", default=1, type=float, help="Scale the output image resultion")
-@click.option("-t", "--truncation_value", default=None, type=float)
-@click.option("-l", "--show-lowres", default=False, is_flag=True)
-@click.option("--save", default=False, is_flag=True)
-@click.option("--train", default=False, is_flag=True)
-@click.option("--multi-modal-truncation", "--mt", default=False, is_flag=True)
-def show_samples(
- config_path: str,
- save: bool,
- train: bool,
- **kwargs):
- tops.set_seed(1)
- cfg = utils.load_config(config_path)
- G = infer.build_trained_generator(cfg)
- cfg.train.batch_size = 1
- if train:
- dl_val = cfg.data.train.loader
- else:
- dl_val = cfg.data.val.loader
- dl_val.num_workers = 1
- dl_val.shuffle = False
- dl_val.infinite = False
- tops.set_seed(1)
- dl_val = iter(instantiate(dl_val))
- b_idx = 0
- im = get_im(dl_val, G, b_idx=b_idx, **kwargs)
- print("Press 'a' for next image, 'q' to quit.")
- while True:
- b_idx += 1
- cv2.imshow("image", im[:, :, ::-1])
- if save:
- cv2.imwrite("test.png", im[:, :, ::-1])
- print("Saved file to test.png")
- key = cv2.waitKey(0)
- if key == ord("q"):
- break
- if key == ord("a"):
- im = get_im(dl_val, G, b_idx=b_idx, **kwargs)
-
-if __name__ == "__main__":
- show_samples()
\ No newline at end of file
diff --git a/deep_privacy/train.py b/deep_privacy/train.py
deleted file mode 100644
index b75d408b28a078aa0f77370ce6bba4b428998044..0000000000000000000000000000000000000000
--- a/deep_privacy/train.py
+++ /dev/null
@@ -1,190 +0,0 @@
-import tempfile
-import click
-import tops
-import warnings
-import traceback
-import torch
-import os
-from tops import checkpointer
-from sg3_torch_utils.ops import conv2d_gradfix, grid_sample_gradfix, bias_act, upfirdn2d
-from tops.config import instantiate
-from tops import logger
-from dp2 import utils, infer
-from dp2.gan_trainer import GANTrainer
-
-
-torch.backends.cudnn.benchmark = True
-
-
-def start_train(rank, world_size, debug, cfg_path, temp_dir, benchmark: bool):
- print(rank, world_size)
- cfg = utils.load_config(cfg_path)
- if debug:
- torch.backends.cudnn.benchmark = False
- torch.backends.cudnn.deterministic = True
- torch.set_printoptions(precision=10)
- else:
- torch.backends.cuda.matmul.allow_tf32 = True
- torch.backends.cudnn.allow_tf32 = True
- conv2d_gradfix.enabled = cfg.train.conv2d_gradfix_enabled
- grid_sample_gradfix.enabled = cfg.train.grid_sample_gradfix_enabled
- upfirdn2d.enabled = cfg.train.grid_sample_gradfix_enabled
- bias_act.enabled = cfg.train.bias_act_plugin_enabled
- if world_size > 1:
- init_file = os.path.abspath(os.path.join(temp_dir, ".torch_distributed_init"))
- init_method = f"file://{init_file}"
- torch.distributed.init_process_group(
- "nccl", rank=rank, world_size=world_size, init_method=init_method
- )
- # pin memory in dataloader would allocate memory on device:0 for distributed training.
- torch.cuda.set_device(tops.get_device())
-
- tops.set_AMP(cfg.train.amp.enabled)
- utils.init_tops(cfg)
- if tops.rank() == 0:
- utils.print_config(cfg)
- with open(cfg.output_dir.joinpath("config_path.py"), "w") as fp:
- fp.write(utils.config_to_str(cfg))
-
- if world_size > 1:
- assert cfg.train.batch_size > tops.world_size()
- assert cfg.train.batch_size % tops.world_size() == 0
- cfg.train.batch_size //= world_size
- if rank != 0:
- warnings.filterwarnings("ignore", category=DeprecationWarning)
- warnings.filterwarnings("ignore", category=UserWarning)
- tops.set_seed(cfg.train.seed + rank)
- logger.log("Loading dataset.")
- dl_val = instantiate(cfg.data.val.loader, channels_last=cfg.train.channels_last)
- dl_train = instantiate(cfg.data.train.loader, channels_last=cfg.train.channels_last)
- dl_train = iter(dl_train)
-
- logger.log("Initializing models.")
- G = instantiate(cfg.generator)
- D = tops.to_cuda(instantiate(cfg.discriminator))
- if tops.rank() == 0:
- print(G)
- print(D)
-
- # TODO: EMA MIGHT NEED TO BE SYNCED ACCROSS GPUs before instantiate
- G_EMA = utils.EMA(G, cfg.train.batch_size * world_size, **cfg.EMA)
- G = tops.to_cuda(G)
- if world_size > 1:
- logger.log("Syncing models accross GPUs")
- # Distributed is implemented self. # Buffers are never broadcasted during training.
- for module in [G_EMA, G, D]:
- params_and_buffers = list(module.named_parameters())
- params_and_buffers += list(module.named_buffers())
- for name, param in params_and_buffers:
- torch.distributed.broadcast(param, src=0)
- if cfg.train.compile_D.enabled:
- compile_kwargs = instantiate(cfg.train.compile_D)
- compile_kwargs.pop("enabled")
- D = torch.compile(D, **compile_kwargs)
- if cfg.train.compile_G.enabled:
- compile_kwargs = instantiate(cfg.train.compile_G)
- compile_kwargs.pop("enabled")
- G = torch.compile(G, **compile_kwargs)
- logger.log("Initializing optimizers")
- grad_scaler_D = instantiate(cfg.train.amp.scaler_D)
- grad_scaler_G = instantiate(cfg.train.amp.scaler_G)
-
- G_optim = instantiate(cfg.G_optim, params=G.parameters())
- D_optim = instantiate(cfg.D_optim, params=D.parameters())
-
- loss_fnc = instantiate(cfg.loss_fnc, D=D, G=G)
- logger.add_scalar("stats/gpu_batch_size", cfg.train.batch_size)
- logger.add_scalar("stats/ngpus", world_size)
-
- D.train()
- G.train()
- if hasattr(cfg.train, "discriminator_init_cfg") and not benchmark:
- cfg_ = utils.load_config(cfg.train.discriminator_init_cfg)
- ckpt = checkpointer.load_checkpoint(cfg_.checkpoint_dir)["discriminator"]
- if hasattr(cfg_, "ckpt_mapper_D"):
- ckpt = instantiate(cfg_.ckpt_mapper_D)(ckpt)
- D.load_state_dict(ckpt)
- if hasattr(cfg.train, "generator_init_cfg") and not benchmark:
- cfg_ = utils.load_config(cfg.train.generator_init_cfg)
- ckpt = checkpointer.load_checkpoint(cfg_.checkpoint_dir)["EMA_generator"]
- if hasattr(cfg_, "ckpt_mapper"):
- ckpt = instantiate(cfg_.ckpt_mapper)(ckpt)
- infer.load_state_dict(G, ckpt)
- infer.load_state_dict(G_EMA.generator, ckpt)
-
- G_EMA.eval()
- if cfg.train.channels_last:
- G = G.to(memory_format=torch.channels_last)
- D = D.to(memory_format=torch.channels_last)
-
- if tops.world_size() > 1:
- torch.distributed.barrier()
-
- trainer = GANTrainer(
- G=G,
- D=D,
- G_EMA=G_EMA,
- D_optim=D_optim,
- G_optim=G_optim,
- dl_train=dl_train,
- dl_val=dl_val,
- scaler_D=grad_scaler_D,
- scaler_G=grad_scaler_G,
- ims_per_log=cfg.train.ims_per_log,
- max_images_to_train=cfg.train.max_images_to_train,
- ims_per_val=cfg.train.ims_per_val,
- loss_handler=loss_fnc,
- evaluate_fn=instantiate(cfg.data.train_evaluation_fn),
- batch_size=cfg.train.batch_size,
- broadcast_buffers=cfg.train.broadcast_buffers,
- fp16_ddp_accumulate=cfg.train.fp16_ddp_accumulate,
- save_state=not benchmark
- )
- if benchmark:
- trainer.estimate_ims_per_hour()
- if world_size > 1:
- torch.distributed.barrier()
- logger.finish()
- if world_size > 1:
- torch.distributed.destroy_process_group()
- return
-
- try:
- trainer.train_loop()
- except Exception as e:
- traceback.print_exc()
- exit()
- tops.set_AMP(False)
- tops.set_seed(0)
- metrics = instantiate(cfg.data.evaluation_fn)(generator=G_EMA, dataloader=dl_val)
- metrics = {f"metrics_final/{k}": v for k, v in metrics.items()}
- logger.add_dict(metrics, level=logger.logger.INFO)
- if world_size > 1:
- torch.distributed.barrier()
- logger.finish()
-
- if world_size > 1:
- torch.distributed.destroy_process_group()
-
-
-@click.command()
-@click.argument("config_path")
-@click.option("--debug", default=False, is_flag=True)
-@click.option("--benchmark", default=False, is_flag=True)
-def main(config_path: str, debug: bool, benchmark: bool):
- world_size = (
- torch.cuda.device_count()
- ) # Manually overriding this does not work. have to set CUDA_VISIBLE_DEVICES environment variable
- if world_size > 1:
- torch.multiprocessing.set_start_method("spawn", force=True)
- with tempfile.TemporaryDirectory() as temp_dir:
- torch.multiprocessing.spawn(
- start_train,
- args=(world_size, debug, config_path, temp_dir, benchmark),
- nprocs=torch.cuda.device_count(),
- )
- else:
- start_train(0, 1, debug, config_path, None, benchmark)
-
-if __name__ == "__main__":
- main()
diff --git a/deep_privacy/validate.py b/deep_privacy/validate.py
deleted file mode 100644
index 099e0d1c7d9b4cca016967f9151eb04f0a9c2048..0000000000000000000000000000000000000000
--- a/deep_privacy/validate.py
+++ /dev/null
@@ -1,64 +0,0 @@
-import click
-import torch
-import os
-import tempfile
-from dp2.infer import build_trained_generator
-from tops.config import instantiate
-from dp2.utils import load_config
-import tops
-from tops import logger
-
-
-def validate(
- rank,
- config_path,
- batch_size: int,
- truncation_value: float,
- world_size,
- temp_dir,
- ):
- tops.set_seed(0)
- tops.set_AMP(False)
- if world_size > 1:
- init_file = os.path.abspath(os.path.join(temp_dir, '.torch_distributed_init'))
- init_method = f'file://{init_file}'
- torch.distributed.init_process_group(
- "nccl", rank=rank, world_size=world_size, init_method=init_method)
- torch.cuda.set_device(tops.get_device()) # pin memory in dataloader would allocate memory on device:0 for distributed training.
- cfg = load_config(config_path)
-
- if batch_size is not None:
- assert cfg.train.batch_size % world_size == 0
- cfg.train.batch_size = batch_size // world_size
- dl_val = instantiate(cfg.data.val.loader)
- G = build_trained_generator(cfg)
- tops.set_seed(0)
- tops.set_AMP(False)
- metrics = instantiate(cfg.data.evaluation_fn)(generator=G, dataloader=dl_val, truncation_value=truncation_value)
- metrics = {f"metrics_final/{k}": v for k,v in metrics.items()}
- if rank == 0:
- tops.init(cfg.output_dir)
- logger.add_dict(metrics)
- logger.finish()
-
-
-@click.command()
-@click.argument("config_path")
-@click.option("--batch_size", default=16, type=int)
-@click.option("--truncation-value", default=None, type=float)
-def main(config_path, batch_size: int, truncation_value: float):
- world_size = torch.cuda.device_count()
- if world_size > 1:
- torch.multiprocessing.set_start_method("spawn", force=True)
- with tempfile.TemporaryDirectory() as temp_dir:
- torch.multiprocessing.spawn(validate,
- args=(config_path, batch_size, truncation_value, world_size, temp_dir),
- nprocs=world_size)
- else:
- validate(
- 0, config_path, batch_size, truncation_value,
- world_size=1, temp_dir=None)
-
-
-if __name__ == "__main__":
- main()
\ No newline at end of file
diff --git a/deep_privacy/dp2/__init__.py b/dp2/__init__.py
similarity index 100%
rename from deep_privacy/dp2/__init__.py
rename to dp2/__init__.py
diff --git a/deep_privacy/dp2/anonymizer/__init__.py b/dp2/anonymizer/__init__.py
similarity index 100%
rename from deep_privacy/dp2/anonymizer/__init__.py
rename to dp2/anonymizer/__init__.py
diff --git a/deep_privacy/dp2/anonymizer/anonymizer.py b/dp2/anonymizer/anonymizer.py
similarity index 100%
rename from deep_privacy/dp2/anonymizer/anonymizer.py
rename to dp2/anonymizer/anonymizer.py
diff --git a/deep_privacy/dp2/anonymizer/histogram_match_anonymizers.py b/dp2/anonymizer/histogram_match_anonymizers.py
similarity index 100%
rename from deep_privacy/dp2/anonymizer/histogram_match_anonymizers.py
rename to dp2/anonymizer/histogram_match_anonymizers.py
diff --git a/deep_privacy/dp2/data/__init__.py b/dp2/data/__init__.py
similarity index 100%
rename from deep_privacy/dp2/data/__init__.py
rename to dp2/data/__init__.py
diff --git a/deep_privacy/dp2/data/build.py b/dp2/data/build.py
similarity index 100%
rename from deep_privacy/dp2/data/build.py
rename to dp2/data/build.py
diff --git a/deep_privacy/dp2/data/datasets/__init__.py b/dp2/data/datasets/__init__.py
similarity index 100%
rename from deep_privacy/dp2/data/datasets/__init__.py
rename to dp2/data/datasets/__init__.py
diff --git a/deep_privacy/dp2/data/datasets/coco_cse.py b/dp2/data/datasets/coco_cse.py
similarity index 100%
rename from deep_privacy/dp2/data/datasets/coco_cse.py
rename to dp2/data/datasets/coco_cse.py
diff --git a/deep_privacy/dp2/data/datasets/fdf.py b/dp2/data/datasets/fdf.py
similarity index 100%
rename from deep_privacy/dp2/data/datasets/fdf.py
rename to dp2/data/datasets/fdf.py
diff --git a/deep_privacy/dp2/data/datasets/fdf128_wds.py b/dp2/data/datasets/fdf128_wds.py
similarity index 100%
rename from deep_privacy/dp2/data/datasets/fdf128_wds.py
rename to dp2/data/datasets/fdf128_wds.py
diff --git a/deep_privacy/dp2/data/datasets/fdh.py b/dp2/data/datasets/fdh.py
similarity index 100%
rename from deep_privacy/dp2/data/datasets/fdh.py
rename to dp2/data/datasets/fdh.py
diff --git a/deep_privacy/dp2/data/transforms/__init__.py b/dp2/data/transforms/__init__.py
similarity index 100%
rename from deep_privacy/dp2/data/transforms/__init__.py
rename to dp2/data/transforms/__init__.py
diff --git a/deep_privacy/dp2/data/transforms/functional.py b/dp2/data/transforms/functional.py
similarity index 100%
rename from deep_privacy/dp2/data/transforms/functional.py
rename to dp2/data/transforms/functional.py
diff --git a/deep_privacy/dp2/data/transforms/stylegan2_transform.py b/dp2/data/transforms/stylegan2_transform.py
similarity index 100%
rename from deep_privacy/dp2/data/transforms/stylegan2_transform.py
rename to dp2/data/transforms/stylegan2_transform.py
diff --git a/deep_privacy/dp2/data/transforms/transforms.py b/dp2/data/transforms/transforms.py
similarity index 100%
rename from deep_privacy/dp2/data/transforms/transforms.py
rename to dp2/data/transforms/transforms.py
diff --git a/deep_privacy/dp2/data/utils.py b/dp2/data/utils.py
similarity index 100%
rename from deep_privacy/dp2/data/utils.py
rename to dp2/data/utils.py
diff --git a/deep_privacy/dp2/detection/__init__.py b/dp2/detection/__init__.py
similarity index 100%
rename from deep_privacy/dp2/detection/__init__.py
rename to dp2/detection/__init__.py
diff --git a/deep_privacy/dp2/detection/base.py b/dp2/detection/base.py
similarity index 100%
rename from deep_privacy/dp2/detection/base.py
rename to dp2/detection/base.py
diff --git a/deep_privacy/dp2/detection/box_utils.py b/dp2/detection/box_utils.py
similarity index 100%
rename from deep_privacy/dp2/detection/box_utils.py
rename to dp2/detection/box_utils.py
diff --git a/deep_privacy/dp2/detection/box_utils_fdf.py b/dp2/detection/box_utils_fdf.py
similarity index 100%
rename from deep_privacy/dp2/detection/box_utils_fdf.py
rename to dp2/detection/box_utils_fdf.py
diff --git a/deep_privacy/dp2/detection/cse_mask_face_detector.py b/dp2/detection/cse_mask_face_detector.py
similarity index 100%
rename from deep_privacy/dp2/detection/cse_mask_face_detector.py
rename to dp2/detection/cse_mask_face_detector.py
diff --git a/deep_privacy/dp2/detection/deep_privacy1_detector.py b/dp2/detection/deep_privacy1_detector.py
similarity index 100%
rename from deep_privacy/dp2/detection/deep_privacy1_detector.py
rename to dp2/detection/deep_privacy1_detector.py
diff --git a/deep_privacy/dp2/detection/face_detector.py b/dp2/detection/face_detector.py
similarity index 100%
rename from deep_privacy/dp2/detection/face_detector.py
rename to dp2/detection/face_detector.py
diff --git a/deep_privacy/dp2/detection/models/__init__.py b/dp2/detection/models/__init__.py
similarity index 100%
rename from deep_privacy/dp2/detection/models/__init__.py
rename to dp2/detection/models/__init__.py
diff --git a/deep_privacy/dp2/detection/models/cse.py b/dp2/detection/models/cse.py
similarity index 100%
rename from deep_privacy/dp2/detection/models/cse.py
rename to dp2/detection/models/cse.py
diff --git a/deep_privacy/dp2/detection/models/keypoint_maskrcnn.py b/dp2/detection/models/keypoint_maskrcnn.py
similarity index 100%
rename from deep_privacy/dp2/detection/models/keypoint_maskrcnn.py
rename to dp2/detection/models/keypoint_maskrcnn.py
diff --git a/deep_privacy/dp2/detection/models/mask_rcnn.py b/dp2/detection/models/mask_rcnn.py
similarity index 100%
rename from deep_privacy/dp2/detection/models/mask_rcnn.py
rename to dp2/detection/models/mask_rcnn.py
diff --git a/deep_privacy/dp2/detection/models/vit_pose/backbone.py b/dp2/detection/models/vit_pose/backbone.py
similarity index 100%
rename from deep_privacy/dp2/detection/models/vit_pose/backbone.py
rename to dp2/detection/models/vit_pose/backbone.py
diff --git a/deep_privacy/dp2/detection/models/vit_pose/topdown_heatmap_simple_head.py b/dp2/detection/models/vit_pose/topdown_heatmap_simple_head.py
similarity index 100%
rename from deep_privacy/dp2/detection/models/vit_pose/topdown_heatmap_simple_head.py
rename to dp2/detection/models/vit_pose/topdown_heatmap_simple_head.py
diff --git a/deep_privacy/dp2/detection/models/vit_pose/vit_pose.py b/dp2/detection/models/vit_pose/vit_pose.py
similarity index 100%
rename from deep_privacy/dp2/detection/models/vit_pose/vit_pose.py
rename to dp2/detection/models/vit_pose/vit_pose.py
diff --git a/deep_privacy/dp2/detection/models/vit_pose_maskrcnn.py b/dp2/detection/models/vit_pose_maskrcnn.py
similarity index 100%
rename from deep_privacy/dp2/detection/models/vit_pose_maskrcnn.py
rename to dp2/detection/models/vit_pose_maskrcnn.py
diff --git a/deep_privacy/dp2/detection/person_detector.py b/dp2/detection/person_detector.py
similarity index 100%
rename from deep_privacy/dp2/detection/person_detector.py
rename to dp2/detection/person_detector.py
diff --git a/deep_privacy/dp2/detection/structures.py b/dp2/detection/structures.py
similarity index 100%
rename from deep_privacy/dp2/detection/structures.py
rename to dp2/detection/structures.py
diff --git a/deep_privacy/dp2/detection/utils.py b/dp2/detection/utils.py
similarity index 100%
rename from deep_privacy/dp2/detection/utils.py
rename to dp2/detection/utils.py
diff --git a/deep_privacy/dp2/discriminator/__init__.py b/dp2/discriminator/__init__.py
similarity index 100%
rename from deep_privacy/dp2/discriminator/__init__.py
rename to dp2/discriminator/__init__.py
diff --git a/deep_privacy/dp2/discriminator/sg2_discriminator.py b/dp2/discriminator/sg2_discriminator.py
similarity index 100%
rename from deep_privacy/dp2/discriminator/sg2_discriminator.py
rename to dp2/discriminator/sg2_discriminator.py
diff --git a/deep_privacy/dp2/gan_trainer.py b/dp2/gan_trainer.py
similarity index 100%
rename from deep_privacy/dp2/gan_trainer.py
rename to dp2/gan_trainer.py
diff --git a/deep_privacy/dp2/generator/__init__.py b/dp2/generator/__init__.py
similarity index 100%
rename from deep_privacy/dp2/generator/__init__.py
rename to dp2/generator/__init__.py
diff --git a/deep_privacy/dp2/generator/base.py b/dp2/generator/base.py
similarity index 100%
rename from deep_privacy/dp2/generator/base.py
rename to dp2/generator/base.py
diff --git a/deep_privacy/dp2/generator/deep_privacy1.py b/dp2/generator/deep_privacy1.py
similarity index 100%
rename from deep_privacy/dp2/generator/deep_privacy1.py
rename to dp2/generator/deep_privacy1.py
diff --git a/deep_privacy/dp2/generator/dummy_generators.py b/dp2/generator/dummy_generators.py
similarity index 100%
rename from deep_privacy/dp2/generator/dummy_generators.py
rename to dp2/generator/dummy_generators.py
diff --git a/deep_privacy/dp2/generator/stylegan_unet.py b/dp2/generator/stylegan_unet.py
similarity index 100%
rename from deep_privacy/dp2/generator/stylegan_unet.py
rename to dp2/generator/stylegan_unet.py
diff --git a/deep_privacy/dp2/generator/utils.py b/dp2/generator/utils.py
similarity index 100%
rename from deep_privacy/dp2/generator/utils.py
rename to dp2/generator/utils.py
diff --git a/deep_privacy/dp2/infer.py b/dp2/infer.py
similarity index 100%
rename from deep_privacy/dp2/infer.py
rename to dp2/infer.py
diff --git a/deep_privacy/dp2/layers/__init__.py b/dp2/layers/__init__.py
similarity index 100%
rename from deep_privacy/dp2/layers/__init__.py
rename to dp2/layers/__init__.py
diff --git a/deep_privacy/dp2/layers/sg2_layers.py b/dp2/layers/sg2_layers.py
similarity index 100%
rename from deep_privacy/dp2/layers/sg2_layers.py
rename to dp2/layers/sg2_layers.py
diff --git a/deep_privacy/dp2/loss/__init__.py b/dp2/loss/__init__.py
similarity index 100%
rename from deep_privacy/dp2/loss/__init__.py
rename to dp2/loss/__init__.py
diff --git a/deep_privacy/dp2/loss/pl_regularization.py b/dp2/loss/pl_regularization.py
similarity index 100%
rename from deep_privacy/dp2/loss/pl_regularization.py
rename to dp2/loss/pl_regularization.py
diff --git a/deep_privacy/dp2/loss/r1_regularization.py b/dp2/loss/r1_regularization.py
similarity index 100%
rename from deep_privacy/dp2/loss/r1_regularization.py
rename to dp2/loss/r1_regularization.py
diff --git a/deep_privacy/dp2/loss/sg2_loss.py b/dp2/loss/sg2_loss.py
similarity index 100%
rename from deep_privacy/dp2/loss/sg2_loss.py
rename to dp2/loss/sg2_loss.py
diff --git a/deep_privacy/dp2/loss/utils.py b/dp2/loss/utils.py
similarity index 100%
rename from deep_privacy/dp2/loss/utils.py
rename to dp2/loss/utils.py
diff --git a/deep_privacy/dp2/metrics/__init__.py b/dp2/metrics/__init__.py
similarity index 100%
rename from deep_privacy/dp2/metrics/__init__.py
rename to dp2/metrics/__init__.py
diff --git a/deep_privacy/dp2/metrics/fid.py b/dp2/metrics/fid.py
similarity index 100%
rename from deep_privacy/dp2/metrics/fid.py
rename to dp2/metrics/fid.py
diff --git a/deep_privacy/dp2/metrics/fid_clip.py b/dp2/metrics/fid_clip.py
similarity index 100%
rename from deep_privacy/dp2/metrics/fid_clip.py
rename to dp2/metrics/fid_clip.py
diff --git a/deep_privacy/dp2/metrics/lpips.py b/dp2/metrics/lpips.py
similarity index 100%
rename from deep_privacy/dp2/metrics/lpips.py
rename to dp2/metrics/lpips.py
diff --git a/deep_privacy/dp2/metrics/ppl.py b/dp2/metrics/ppl.py
similarity index 100%
rename from deep_privacy/dp2/metrics/ppl.py
rename to dp2/metrics/ppl.py
diff --git a/deep_privacy/dp2/metrics/torch_metrics.py b/dp2/metrics/torch_metrics.py
similarity index 100%
rename from deep_privacy/dp2/metrics/torch_metrics.py
rename to dp2/metrics/torch_metrics.py
diff --git a/deep_privacy/dp2/utils/__init__.py b/dp2/utils/__init__.py
similarity index 100%
rename from deep_privacy/dp2/utils/__init__.py
rename to dp2/utils/__init__.py
diff --git a/deep_privacy/dp2/utils/bufferless_video_capture.py b/dp2/utils/bufferless_video_capture.py
similarity index 100%
rename from deep_privacy/dp2/utils/bufferless_video_capture.py
rename to dp2/utils/bufferless_video_capture.py
diff --git a/deep_privacy/dp2/utils/cse.py b/dp2/utils/cse.py
similarity index 100%
rename from deep_privacy/dp2/utils/cse.py
rename to dp2/utils/cse.py
diff --git a/deep_privacy/dp2/utils/ema.py b/dp2/utils/ema.py
similarity index 100%
rename from deep_privacy/dp2/utils/ema.py
rename to dp2/utils/ema.py
diff --git a/deep_privacy/dp2/utils/torch_utils.py b/dp2/utils/torch_utils.py
similarity index 100%
rename from deep_privacy/dp2/utils/torch_utils.py
rename to dp2/utils/torch_utils.py
diff --git a/deep_privacy/dp2/utils/utils.py b/dp2/utils/utils.py
similarity index 100%
rename from deep_privacy/dp2/utils/utils.py
rename to dp2/utils/utils.py
diff --git a/deep_privacy/dp2/utils/vis_utils.py b/dp2/utils/vis_utils.py
similarity index 100%
rename from deep_privacy/dp2/utils/vis_utils.py
rename to dp2/utils/vis_utils.py
diff --git a/deep_privacy/gradio_demos/body_cse.py b/gradio_demos/body_cse.py
similarity index 100%
rename from deep_privacy/gradio_demos/body_cse.py
rename to gradio_demos/body_cse.py
diff --git a/deep_privacy/gradio_demos/face.py b/gradio_demos/face.py
similarity index 100%
rename from deep_privacy/gradio_demos/face.py
rename to gradio_demos/face.py
diff --git a/deep_privacy/gradio_demos/modules.py b/gradio_demos/modules.py
similarity index 100%
rename from deep_privacy/gradio_demos/modules.py
rename to gradio_demos/modules.py
diff --git a/media b/media
index e5608e46462b7ba4b810c8efe529bd2ac01a9b4d..572f8534e08ba73f99435e430d1d901eafdfac1b 120000
--- a/media
+++ b/media
@@ -1 +1 @@
-deep_privacy/media
\ No newline at end of file
+media2
\ No newline at end of file
diff --git a/deep_privacy/media/erling.jpg b/media2/erling.jpg
similarity index 100%
rename from deep_privacy/media/erling.jpg
rename to media2/erling.jpg
diff --git a/deep_privacy/media/g7_leaders.jpg b/media2/g7_leaders.jpg
similarity index 100%
rename from deep_privacy/media/g7_leaders.jpg
rename to media2/g7_leaders.jpg
diff --git a/deep_privacy/media/regjeringen.jpg b/media2/regjeringen.jpg
similarity index 100%
rename from deep_privacy/media/regjeringen.jpg
rename to media2/regjeringen.jpg
diff --git a/deep_privacy/media/stylemc_example.jpg b/media2/stylemc_example.jpg
similarity index 100%
rename from deep_privacy/media/stylemc_example.jpg
rename to media2/stylemc_example.jpg
diff --git a/deep_privacy/sg3_torch_utils/LICENSE.txt b/sg3_torch_utils/LICENSE.txt
similarity index 100%
rename from deep_privacy/sg3_torch_utils/LICENSE.txt
rename to sg3_torch_utils/LICENSE.txt
diff --git a/deep_privacy/sg3_torch_utils/__init__.py b/sg3_torch_utils/__init__.py
similarity index 100%
rename from deep_privacy/sg3_torch_utils/__init__.py
rename to sg3_torch_utils/__init__.py
diff --git a/deep_privacy/sg3_torch_utils/custom_ops.py b/sg3_torch_utils/custom_ops.py
similarity index 100%
rename from deep_privacy/sg3_torch_utils/custom_ops.py
rename to sg3_torch_utils/custom_ops.py
diff --git a/deep_privacy/sg3_torch_utils/misc.py b/sg3_torch_utils/misc.py
similarity index 100%
rename from deep_privacy/sg3_torch_utils/misc.py
rename to sg3_torch_utils/misc.py
diff --git a/deep_privacy/sg3_torch_utils/ops/__init__.py b/sg3_torch_utils/ops/__init__.py
similarity index 100%
rename from deep_privacy/sg3_torch_utils/ops/__init__.py
rename to sg3_torch_utils/ops/__init__.py
diff --git a/deep_privacy/sg3_torch_utils/ops/bias_act.cpp b/sg3_torch_utils/ops/bias_act.cpp
similarity index 100%
rename from deep_privacy/sg3_torch_utils/ops/bias_act.cpp
rename to sg3_torch_utils/ops/bias_act.cpp
diff --git a/deep_privacy/sg3_torch_utils/ops/bias_act.cu b/sg3_torch_utils/ops/bias_act.cu
similarity index 100%
rename from deep_privacy/sg3_torch_utils/ops/bias_act.cu
rename to sg3_torch_utils/ops/bias_act.cu
diff --git a/deep_privacy/sg3_torch_utils/ops/bias_act.h b/sg3_torch_utils/ops/bias_act.h
similarity index 100%
rename from deep_privacy/sg3_torch_utils/ops/bias_act.h
rename to sg3_torch_utils/ops/bias_act.h
diff --git a/deep_privacy/sg3_torch_utils/ops/bias_act.py b/sg3_torch_utils/ops/bias_act.py
similarity index 100%
rename from deep_privacy/sg3_torch_utils/ops/bias_act.py
rename to sg3_torch_utils/ops/bias_act.py
diff --git a/deep_privacy/sg3_torch_utils/ops/conv2d_gradfix.py b/sg3_torch_utils/ops/conv2d_gradfix.py
similarity index 100%
rename from deep_privacy/sg3_torch_utils/ops/conv2d_gradfix.py
rename to sg3_torch_utils/ops/conv2d_gradfix.py
diff --git a/deep_privacy/sg3_torch_utils/ops/conv2d_resample.py b/sg3_torch_utils/ops/conv2d_resample.py
similarity index 100%
rename from deep_privacy/sg3_torch_utils/ops/conv2d_resample.py
rename to sg3_torch_utils/ops/conv2d_resample.py
diff --git a/deep_privacy/sg3_torch_utils/ops/fma.py b/sg3_torch_utils/ops/fma.py
similarity index 100%
rename from deep_privacy/sg3_torch_utils/ops/fma.py
rename to sg3_torch_utils/ops/fma.py
diff --git a/deep_privacy/sg3_torch_utils/ops/grid_sample_gradfix.py b/sg3_torch_utils/ops/grid_sample_gradfix.py
similarity index 100%
rename from deep_privacy/sg3_torch_utils/ops/grid_sample_gradfix.py
rename to sg3_torch_utils/ops/grid_sample_gradfix.py
diff --git a/deep_privacy/sg3_torch_utils/ops/upfirdn2d.cpp b/sg3_torch_utils/ops/upfirdn2d.cpp
similarity index 100%
rename from deep_privacy/sg3_torch_utils/ops/upfirdn2d.cpp
rename to sg3_torch_utils/ops/upfirdn2d.cpp
diff --git a/deep_privacy/sg3_torch_utils/ops/upfirdn2d.cu b/sg3_torch_utils/ops/upfirdn2d.cu
similarity index 100%
rename from deep_privacy/sg3_torch_utils/ops/upfirdn2d.cu
rename to sg3_torch_utils/ops/upfirdn2d.cu
diff --git a/deep_privacy/sg3_torch_utils/ops/upfirdn2d.h b/sg3_torch_utils/ops/upfirdn2d.h
similarity index 100%
rename from deep_privacy/sg3_torch_utils/ops/upfirdn2d.h
rename to sg3_torch_utils/ops/upfirdn2d.h
diff --git a/deep_privacy/sg3_torch_utils/ops/upfirdn2d.py b/sg3_torch_utils/ops/upfirdn2d.py
similarity index 100%
rename from deep_privacy/sg3_torch_utils/ops/upfirdn2d.py
rename to sg3_torch_utils/ops/upfirdn2d.py